diff --git a/Dockerfile.openshift b/Dockerfile.openshift
index 73e309105..deec7c97c 100644
--- a/Dockerfile.openshift
+++ b/Dockerfile.openshift
@@ -14,13 +14,19 @@ COPY controllers/ controllers/
COPY pkg/ pkg/
COPY vendor/ vendor/
COPY bindata/manifests/ bindata/manifests/
+COPY test/ test/
# Build
RUN CGO_ENABLED=0 GO111MODULE=on go build -a -mod=vendor -o manager main.go
+# Build extended tests
+RUN make -C test build-e2e-tests && \
+ gzip test/bin/ingress-node-firewall-tests
+
FROM registry.ci.openshift.org/ocp/4.22:base-rhel9
WORKDIR /
COPY --from=builder /workspace/manager .
COPY --from=builder /workspace/bindata/manifests /bindata/manifests
+COPY --from=builder /workspace/test/bin/ingress-node-firewall-tests.gz /usr/bin/
ENTRYPOINT ["/manager"]
diff --git a/Makefile b/Makefile
index 628b17588..f7440d8e4 100644
--- a/Makefile
+++ b/Makefile
@@ -480,3 +480,12 @@ podman-build-daemon: ## Build the daemon image with podman. To change location,
.PHONY: podman-push-daemon
podman-push-daemon: ## Push the daemon image with docker. To change location, specify DAEMON_IMG=.
podman push ${DAEMON_IMG}
+
+##@ Extended Tests (OTE)
+.PHONY: build-e2e-tests
+build-e2e-tests: ## Build the extended e2e test binary for OpenShift
+ $(MAKE) -C test build-e2e-tests
+
+.PHONY: clean-e2e-tests
+clean-e2e-tests: ## Clean the extended e2e test artifacts
+ $(MAKE) -C test clean
diff --git a/go.mod b/go.mod
index 33ee76c11..043d4f3ff 100644
--- a/go.mod
+++ b/go.mod
@@ -13,10 +13,13 @@ require (
github.com/google/gopacket v1.1.19
github.com/kennygrant/sanitize v1.2.4
github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/ginkgo/v2 v2.23.3
github.com/onsi/gomega v1.37.0
+ github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc8801b9df
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.63.0
+ github.com/spf13/cobra v1.8.1
github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa
golang.org/x/sys v0.32.0
gopkg.in/mcuadros/go-syslog.v2 v2.3.0
@@ -32,8 +35,10 @@ require (
)
require (
+ cel.dev/expr v0.18.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -47,16 +52,20 @@ require (
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
+ github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.16 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@@ -72,6 +81,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/otel v1.32.0 // indirect
@@ -79,13 +89,17 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.37.0 // indirect
- golang.org/x/oauth2 v0.25.0 // indirect
+ golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/time v0.7.0 // indirect
+ golang.org/x/tools v0.30.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
@@ -97,3 +111,34 @@ require (
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
+
+// Replace directives for OTE framework
+replace (
+ github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12
+ k8s.io/api => k8s.io/api v0.32.3
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.3
+ k8s.io/apimachinery => k8s.io/apimachinery v0.32.3
+ k8s.io/apiserver => k8s.io/apiserver v0.32.3
+ k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.3
+ k8s.io/client-go => k8s.io/client-go v0.32.3
+ k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.3
+ k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.3
+ k8s.io/code-generator => k8s.io/code-generator v0.32.3
+ k8s.io/component-base => k8s.io/component-base v0.32.3
+ k8s.io/component-helpers => k8s.io/component-helpers v0.32.3
+ k8s.io/controller-manager => k8s.io/controller-manager v0.32.3
+ k8s.io/cri-api => k8s.io/cri-api v0.32.3
+ k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.3
+ k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.3
+ k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.3
+ k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.3
+ k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.3
+ k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.3
+ k8s.io/kubectl => k8s.io/kubectl v0.32.3
+ k8s.io/kubelet => k8s.io/kubelet v0.32.3
+ k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.32.3
+ k8s.io/metrics => k8s.io/metrics v0.32.3
+ k8s.io/mount-utils => k8s.io/mount-utils v0.32.3
+ k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.3
+ k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.3
+)
diff --git a/go.sum b/go.sum
index 50df3ef57..5ac0aac34 100644
--- a/go.sum
+++ b/go.sum
@@ -1,9 +1,13 @@
+cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
+cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -16,6 +20,7 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.18.0 h1:OsSwqS4y+gQHxaKgg2U/+Fev834kdnsQbtzRnbVC6Gs=
github.com/cilium/ebpf v0.18.0/go.mod h1:vmsAT73y4lW2b4peE+qcOqw6MxvWQdC+LiU5gd/xyo4=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -45,7 +50,6 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s=
github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@@ -64,6 +68,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
+github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -88,6 +94,8 @@ github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
@@ -136,12 +144,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
-github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
+github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc8801b9df h1:/KiCxPFpkZN4HErfAX5tyhn6G3ziPFbkGswHVAZKY5Q=
+github.com/openshift-eng/openshift-tests-extension v0.0.0-20251218142942-7ecc8801b9df/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M=
+github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12 h1:AKx/w1qpS8We43bsRgf8Nll3CGlDHpr/WAXvuedTNZI=
+github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241205171354-8006f302fd12/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -157,11 +167,21 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa h1:iAhToRwOrdk+pKzclvLM7nKZhsg8f7dVrgkFccDUbUw=
@@ -187,6 +207,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -199,8 +221,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
-golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
-golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
+golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -242,6 +264,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
+google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -265,6 +291,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
diff --git a/test/Makefile b/test/Makefile
new file mode 100644
index 000000000..b2ebb2714
--- /dev/null
+++ b/test/Makefile
@@ -0,0 +1,47 @@
+# test/Makefile - Build targets for ingress-node-firewall extended tests
+
+SHELL := /bin/bash
+
+# Binary name
+BINARY_NAME := ingress-node-firewall-tests
+
+# Build directory
+BUILD_DIR := bin
+BINARY_PATH := $(BUILD_DIR)/$(BINARY_NAME)
+
+# Go build flags
+GO := go
+GOFLAGS ?=
+LDFLAGS := -w -s
+
+# Version information
+VERSION ?= $(shell git describe --tags --always --dirty 2>/dev/null || echo "unknown")
+GIT_COMMIT ?= $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
+BUILD_DATE ?= $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
+
+# LDFLAGS with version info
+LDFLAGS += -X github.com/openshift/ingress-node-firewall/test/version.Version=$(VERSION)
+LDFLAGS += -X github.com/openshift/ingress-node-firewall/test/version.GitCommit=$(GIT_COMMIT)
+LDFLAGS += -X github.com/openshift/ingress-node-firewall/test/version.BuildDate=$(BUILD_DATE)
+
+.PHONY: all
+all: build-e2e-tests
+
+.PHONY: build-e2e-tests
+build-e2e-tests: ## Build the extended e2e test binary (static, ART compliant)
+ @echo "Building $(BINARY_NAME)..."
+ @mkdir -p $(BUILD_DIR)
+ CGO_ENABLED=0 GO_COMPLIANCE_POLICY="exempt_all" $(GO) build \
+ -a -mod=vendor \
+ -ldflags "$(LDFLAGS)" \
+ -o $(BINARY_PATH) ./cmd/main.go
+ @echo "Built $(BINARY_PATH)"
+
+.PHONY: clean
+clean: ## Clean build artifacts
+ @echo "Cleaning test build artifacts..."
+ @rm -rf $(BUILD_DIR)
+
+.PHONY: help
+help: ## Display this help
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
diff --git a/test/bin/ingress-node-firewall-tests b/test/bin/ingress-node-firewall-tests
new file mode 100755
index 000000000..90e25e60f
Binary files /dev/null and b/test/bin/ingress-node-firewall-tests differ
diff --git a/test/cmd/main.go b/test/cmd/main.go
new file mode 100644
index 000000000..e574b148c
--- /dev/null
+++ b/test/cmd/main.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ // OTE library imports
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd"
+ e "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo"
+
+ // Import test suites (blank import to register Ginkgo tests)
+ _ "github.com/openshift/ingress-node-firewall/test/e2e/operator"
+)
+
+func main() {
+ // 1. Create registry and extension
+ registry := e.NewRegistry()
+ ext := e.NewExtension("openshift", "payload", "ingress-node-firewall")
+
+ // 2. Add test suites with qualifiers
+ ext.AddSuite(e.Suite{
+ Name: "openshift/ingress-node-firewall/conformance/parallel",
+ Parents: []string{"openshift/conformance/parallel"},
+ Qualifiers: []string{`[sig-network] INFW && !labels.exists(l, l=="Serial") && !labels.exists(l, l=="Slow")`},
+ })
+
+ ext.AddSuite(e.Suite{
+ Name: "openshift/ingress-node-firewall/conformance/serial",
+ Parents: []string{"openshift/conformance/serial"},
+ Qualifiers: []string{`[sig-network] INFW && labels.exists(l, l=="Serial")`},
+ })
+
+ ext.AddSuite(e.Suite{
+ Name: "openshift/ingress-node-firewall/optional/slow",
+ Parents: []string{},
+ Qualifiers: []string{`[sig-network] INFW && labels.exists(l, l=="Slow")`},
+ })
+
+ ext.AddSuite(e.Suite{
+ Name: "openshift/ingress-node-firewall/all",
+ Parents: []string{},
+ Qualifiers: []string{`[sig-network] INFW`},
+ })
+
+ // 3. Build test specs from Ginkgo suite
+ specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: couldn't build extension test specs from ginkgo: %v\n", err)
+ os.Exit(1)
+ }
+
+ // 4. Add specs to extension and register
+ ext.AddSpecs(specs)
+ registry.Register(ext)
+
+ // 5. Create root command with OTE commands
+ rootCmd := &cobra.Command{
+ Use: "ingress-node-firewall-tests",
+ Short: "OpenShift extended tests for ingress-node-firewall",
+ Long: "This binary contains extended e2e tests for ingress-node-firewall operator",
+ }
+
+ rootCmd.AddCommand(cmd.DefaultExtensionCommands(registry)...)
+
+ if err := rootCmd.Execute(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/test/e2e/cli.go b/test/e2e/cli.go
new file mode 100644
index 000000000..147defb2d
--- /dev/null
+++ b/test/e2e/cli.go
@@ -0,0 +1,53 @@
+package e2e
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os/exec"
+ "strings"
+)
+
+// OCClient provides helper methods for executing oc commands
+type OCClient struct {
+ kubeconfig string
+}
+
+// NewOCClient creates a new OCClient
+func NewOCClient(kubeconfig string) *OCClient {
+ if kubeconfig == "" {
+ kubeconfig = GetKubeconfig()
+ }
+ return &OCClient{
+ kubeconfig: kubeconfig,
+ }
+}
+
+// Run executes an oc command and returns the output
+func (c *OCClient) Run(ctx context.Context, args ...string) (string, error) {
+ cmdArgs := append([]string{"--kubeconfig", c.kubeconfig}, args...)
+ cmd := exec.CommandContext(ctx, "oc", cmdArgs...)
+
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ err := cmd.Run()
+ if err != nil {
+ return "", fmt.Errorf("command failed: %v, stderr: %s", err, stderr.String())
+ }
+
+ return strings.TrimSpace(stdout.String()), nil
+}
+
+// Get gets a resource
+func (c *OCClient) Get(ctx context.Context, resourceType, name, namespace string) (string, error) {
+ args := []string{"get", resourceType}
+ if name != "" {
+ args = append(args, name)
+ }
+ if namespace != "" {
+ args = append(args, "-n", namespace)
+ }
+ return c.Run(ctx, args...)
+}
diff --git a/test/e2e/operator/operator.go b/test/e2e/operator/operator.go
new file mode 100644
index 000000000..59557f8e7
--- /dev/null
+++ b/test/e2e/operator/operator.go
@@ -0,0 +1,72 @@
+package operator
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ g "github.com/onsi/ginkgo/v2"
+ o "github.com/onsi/gomega"
+
+ e2e "github.com/openshift/ingress-node-firewall/test/e2e"
+)
+
+var _ = g.Describe("[sig-network] INFW", func() {
+ var (
+ oc *e2e.OCClient
+ ctx context.Context
+ cancel context.CancelFunc
+ opNamespace = "openshift-ingress-node-firewall"
+ )
+
+ g.BeforeEach(func() {
+ ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute)
+ oc = e2e.NewOCClient("")
+ })
+
+ g.AfterEach(func() {
+ if cancel != nil {
+ cancel()
+ }
+ })
+
+ // OCP-61481 - Ingress Node Firewall Operator Installation
+ g.It("Author:anusaxen-High-61481-[LEVEL0][OTP]-StagerunBoth-Ingress Node Firewall Operator Installation", func() {
+ g.By("Checking Ingress Node Firewall operator installation")
+
+ // Check that the operator namespace exists
+ output, err := oc.Get(ctx, "namespace", opNamespace, "")
+ o.Expect(err).NotTo(o.HaveOccurred(), "Operator namespace should exist")
+ o.Expect(output).To(o.ContainSubstring(opNamespace), "Namespace output should contain the operator namespace")
+
+ g.By("Verifying CRDs are installed")
+ crdOutput, err := oc.Run(ctx, "get", "crd")
+ o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to list CRDs")
+
+ expectedCRDs := []string{
+ "ingressnodefirewallconfigs.ingressnodefirewall.openshift.io",
+ "ingressnodefirewallnodestates.ingressnodefirewall.openshift.io",
+ "ingressnodefirewalls.ingressnodefirewall.openshift.io",
+ }
+
+ for _, crd := range expectedCRDs {
+ o.Expect(strings.Contains(crdOutput, crd)).To(o.BeTrue(),
+ "CRD %s should be installed", crd)
+ }
+
+ g.By("Verifying operator deployment is running")
+ // Check that the operator deployment exists and is ready
+ deploymentOutput, err := oc.Run(ctx, "get", "deployment", "-n", opNamespace, "-o=jsonpath={.items[*].metadata.name}")
+ o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to list deployments in operator namespace")
+ o.Expect(deploymentOutput).NotTo(o.BeEmpty(), "There should be at least one deployment in the operator namespace")
+
+ // Wait for the operator deployment to be ready
+ deploymentName := "ingress-node-firewall-controller-manager"
+ _, err = oc.Run(ctx, "wait", "deployment/"+deploymentName, "-n", opNamespace, "--for=condition=Available", "--timeout=5m")
+ o.Expect(err).NotTo(o.HaveOccurred(), "Operator deployment should be available")
+
+ g.By("SUCCESS - Ingress Node Firewall operator and CRDs installed")
+ fmt.Println("Operator install and CRDs check successful!")
+ })
+})
diff --git a/test/e2e/util.go b/test/e2e/util.go
new file mode 100644
index 000000000..588d34805
--- /dev/null
+++ b/test/e2e/util.go
@@ -0,0 +1,13 @@
+package e2e
+
+import (
+ "os"
+)
+
+// GetKubeconfig returns the kubeconfig path from KUBECONFIG env var or default location
+func GetKubeconfig() string {
+ if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
+ return kubeconfig
+ }
+ return os.Getenv("HOME") + "/.kube/config"
+}
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..26bc914a3
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.0.1
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/cel.dev/expr/LICENSE b/vendor/cel.dev/expr/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cel.dev/expr/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..9794266f5
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,70 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.36.0",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20240819-fe8ba054a",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "26.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.9",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.49.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "6.0.0",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules")
+switched_rules.use_languages(
+ cc = True,
+ go = True,
+ java = True,
+)
+use_repo(switched_rules, "com_google_googleapis_imports")
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.21.1")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..7930c0b75
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,73 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
+
+Disclaimer: This is not an official Google product.
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..c40881f12
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.0.1'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..8f651f9cc
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,490 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x, ok := x.GetKind().(*ExprValue_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x, ok := x.GetKind().(*ExprValue_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*status.Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
+ 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
+ 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
+ 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_eval_proto_goTypes = []interface{}{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*UnknownSet)(nil), // 3: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result
+ (*Value)(nil), // 5: cel.expr.Value
+ (*status.Status)(nil), // 6: google.rpc.Status
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExprValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnknownSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState_Result); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
new file mode 100644
index 000000000..38ea34ff5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
@@ -0,0 +1,18 @@
+### Go template
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+
+# Go workspace file
+go.work
+
+# No Goland stuff in this repo
+.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
new file mode 100644
index 000000000..a22292eb5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
new file mode 100644
index 000000000..03e5b83eb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/README.md
@@ -0,0 +1,54 @@
+[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
+[](https://pkg.go.dev/github.com/antlr4-go/antlr)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/commit-activity)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
+# ANTLR4 Go Runtime Module Repo
+
+IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
+
+ - Do not submit PRs or any change requests to this repo
+ - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
+ - This repo contains the Go runtime that your generated projects should import
+
+## Introduction
+
+This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
+at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
+the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
+
+The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
+
+### Why?
+
+The `go get` command is unable to retrieve the Go runtime when it is embedded so
+deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
+does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
+causes confusion.
+
+For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
+)
+```
+
+Where you would expect to see:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
+)
+```
+
+The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
+from whence users can expect `go get` to behave as expected.
+
+
+# Documentation
+Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
+migrating existing projects to use the new module location and for information on how to use the Go runtime in
+general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000..3bb4fd7c4
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
@@ -0,0 +1,102 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Go Runtime
+
+At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
+source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
+ANTLR4 that it is compatible with (I.E. uses the /v4 path).
+
+However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
+of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
+This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
+list the release tag such as @4.12.0 - this was confusing, to say the least.
+
+As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
+(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
+which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
+
+This means that if you are using the source code without modules, you should also use the source code in the [new repo].
+Though we highly recommend that you use go modules, as they are now idiomatic for Go.
+
+I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
+
+Go runtime author: [Jim Idle] jimi@idle.ws
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
+it was at any point in its history.
+
+Here is a general/recommended template for an ANTLR based recognizer in Go:
+
+ .
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.1-complete.jar
+ │ ├── generate.go
+ │ └── generate.sh
+ ├── parsing - generated code goes here
+ │ └── error_listeners.go
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
+
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
+is to generate the code into a
+
+From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
+
+ go generate ./...
+
+Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
+by importing the parsing package.
+
+There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
+
+# Copyright Notice
+
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
+[new repo]: https://github.com/antlr4-go/antlr
+[Jim Idle]: https://github.com/jimidle
+[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
+*/
+package antlr
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go
new file mode 100644
index 000000000..cdeefed24
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "sync"
+
+// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
+// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
+var ATNInvalidAltNumber int
+
+// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
+// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
+// in existence.
+//
+// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
+//
+// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
+// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
+// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
+type ATN struct {
+
+ // DecisionToState is the decision points for all rules, sub-rules, optional
+ // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
+ // can go back later and build DFA predictors for them. This includes
+ // all the rules, sub-rules, optional blocks, ()+, ()* etc...
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ // ATNStates is a list of all states in the ATN, ordered by state number.
+ //
+ states []ATNState
+
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
+}
+
+// NewATN returns a new ATN struct representing the given grammarType and is used
+// for runtime deserialization of ATNs from the code generated by the ANTLR tool
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes and returns the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
+// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ iset := s.GetNextTokenWithinRule()
+ if iset == nil {
+ iset = a.NextTokensInContext(s, nil)
+ iset.readOnly = true
+ s.SetNextTokenWithinRule(iset)
+ }
+ return iset
+}
+
+// NextTokens computes and returns the set of valid tokens starting in state s, by
+// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
new file mode 100644
index 000000000..a83f25d34
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
@@ -0,0 +1,335 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+const (
+ lexerConfig = iota // Indicates that this ATNConfig is for a lexer
+ parserConfig // Indicates that this ATNConfig is for a parser
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive in the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context *PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+ cType int // lexerConfig or parserConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
+func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ return NewATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
+func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ pac := &ATNConfig{}
+ pac.state = state
+ pac.alt = alt
+ pac.context = context
+ pac.semanticContext = semanticContext
+ pac.cType = parserConfig
+ return pac
+}
+
+// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
+func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
+func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
+func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
+func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ return NewATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
+// are just wrappers around this one.
+func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
+ }
+ b := &ATNConfig{}
+ b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
+ b.cType = parserConfig
+ return b
+}
+
+func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
+
+ a.state = state
+ a.alt = alt
+ a.context = context
+ a.semanticContext = semanticContext
+ a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
+ a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
+}
+
+func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
+ return a.precedenceFilterSuppressed
+}
+
+func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ a.precedenceFilterSuppressed = v
+}
+
+// GetState returns the ATN state associated with this configuration
+func (a *ATNConfig) GetState() ATNState {
+ return a.state
+}
+
+// GetAlt returns the alternative associated with this configuration
+func (a *ATNConfig) GetAlt() int {
+ return a.alt
+}
+
+// SetContext sets the rule invocation stack associated with this configuration
+func (a *ATNConfig) SetContext(v *PredictionContext) {
+ a.context = v
+}
+
+// GetContext returns the rule invocation stack associated with this configuration
+func (a *ATNConfig) GetContext() *PredictionContext {
+ return a.context
+}
+
+// GetSemanticContext returns the semantic context associated with this configuration
+func (a *ATNConfig) GetSemanticContext() SemanticContext {
+ return a.semanticContext
+}
+
+// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
+func (a *ATNConfig) GetReachesIntoOuterContext() int {
+ return a.reachesIntoOuterContext
+}
+
+// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
+func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
+ a.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
+ switch a.cType {
+ case lexerConfig:
+ return a.LEquals(o)
+ case parserConfig:
+ return a.PEquals(o)
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
+ var other, ok = o.(*ATNConfig)
+
+ if !ok {
+ return false
+ }
+ if a == other {
+ return true
+ } else if other == nil {
+ return false
+ }
+
+ var equal bool
+
+ if a.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = a.context.Equals(other.context)
+ }
+
+ var (
+ nums = a.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = a.alt == other.alt
+ cons = a.semanticContext.Equals(other.semanticContext)
+ sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) Hash() int {
+ switch a.cType {
+ case lexerConfig:
+ return a.LHash()
+ case parserConfig:
+ return a.PHash()
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) PHash() int {
+ var c int
+ if a.context != nil {
+ c = a.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+// String returns a string representation of the ATNConfig, usually used for debugging purposes
+func (a *ATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if a.context != nil {
+ s1 = ",[" + fmt.Sprint(a.context) + "]"
+ }
+
+ if a.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(a.semanticContext)
+ }
+
+ if a.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LHash() int {
+ var f int
+ if a.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, a.context.Hash())
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, a.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
+ var otherT, ok = other.(*ATNConfig)
+ if !ok {
+ return false
+ } else if a == otherT {
+ return true
+ } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ switch {
+ case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
+ return true
+ case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
+ if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
+ return false
+ }
+ default:
+ return false // One but not both, are nil
+ }
+
+ return a.PEquals(otherT)
+}
+
+func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
new file mode 100644
index 000000000..52dbaf806
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type ATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two ATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
+
+ // configs is the added elements that did not match an existing key in configLookup
+ configs []*ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the ATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from this set.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+// Alts returns the combined set of alts for all the configurations in this set.
+func (b *ATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+// NewATNConfigSet creates a new ATNConfigSet instance.
+func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _),
+// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
+// 'pi' is the [ATNConfig].semanticContext.
+//
+// We use (s,i,pi) as the key.
+// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+// GetStates returns the set of states represented by all configurations in this config set
+func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator and Hash() provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *ATNConfigSet) GetPredicates() []SemanticContext {
+ predicates := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ predicates = append(predicates, c)
+ }
+ }
+
+ return predicates
+}
+
+func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ // Empty indicate no optimization is possible
+ if b.configLookup == nil || b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare The configs are only equal if they are in the same order and their Equals function returns true.
+// Java uses ArrayList.equals(), which requires the same order.
+func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+ for i := 0; i < len(b.configs); i++ {
+ if !b.configs[i].Equals(bs.configs[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*ATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*ATNConfigSet)
+ var eca bool
+ switch {
+ case b.conflictingAlts == nil && other2.conflictingAlts == nil:
+ eca = true
+ case b.conflictingAlts != nil && other2.conflictingAlts != nil:
+ eca = b.conflictingAlts.equals(other2.conflictingAlts)
+ }
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ eca &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *ATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *ATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
+ if b.readOnly {
+ panic("not implemented for read-only sets")
+ }
+ if b.configLookup == nil {
+ return false
+ }
+ return b.configLookup.Contains(item)
+}
+
+func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
+ return b.Contains(item)
+}
+
+func (b *ATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+ b.configs = make([]*ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
+}
+
+func (b *ATNConfigSet) String() string {
+
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
+// for use in lexers.
+func NewOrderedATNConfigSet() *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
+ fullCtx: false,
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
new file mode 100644
index 000000000..bdb30b362
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
@@ -0,0 +1,62 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "errors"
+
+var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func (opts *ATNDeserializationOptions) ReadOnly() bool {
+ return opts.readOnly
+}
+
+func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.readOnly = readOnly
+}
+
+func (opts *ATNDeserializationOptions) VerifyATN() bool {
+ return opts.verifyATN
+}
+
+func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.verifyATN = verifyATN
+}
+
+func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
+ return opts.generateRuleBypassTransitions
+}
+
+func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
+ if opts.readOnly {
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.generateRuleBypassTransitions = generateRuleBypassTransitions
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
+ return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
+}
+
+func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+ if other != nil {
+ *o = *other
+ o.readOnly = false
+ }
+ return o
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
new file mode 100644
index 000000000..2dcb9ae11
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const serializedVersion = 4
+
+type loopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type blockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ options *ATNDeserializationOptions
+ data []int32
+ pos int
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = &defaultATNDeserializationOptions
+ }
+
+ return &ATNDeserializer{options: options}
+}
+
+//goland:noinspection GoUnusedFunction
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
+ a.data = data
+ a.pos = 0
+ a.checkVersion()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := a.readSets(atn, nil)
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != serializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ nstates := a.readInt()
+
+ // Allocate worst case size.
+ loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
+ endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
+
+ // Preallocate states slice.
+ atn.states = make([]ATNState, 0, nstates)
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for _, pair := range loopBackStateNumbers {
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for _, pair := range endStateNumbers {
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules)
+
+ for i := range atn.ruleToStartState {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules)
+
+ for _, state := range atn.states {
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+ atn.modeToStartState = make([]*TokensStartState, nmodes)
+
+ for i := range atn.modeToStartState {
+ s := a.readInt()
+
+ atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
+ }
+}
+
+func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
+ m := a.readInt()
+
+ // Preallocate the needed capacity.
+ if cap(sets)-len(sets) < m {
+ isets := make([]*IntervalSet, len(sets), len(sets)+m)
+ copy(isets, sets)
+ sets = isets
+ }
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := a.readInt()
+ i2 := a.readInt()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for _, state := range atn.states {
+ for _, t := range state.GetTransitions() {
+ var rt, ok = t.(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if rt.precedence == 0 {
+ outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for _, state := range atn.states {
+ if s2, ok := state.(BlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.getEndState() == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.getEndState().startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.getEndState().startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count)
+
+ for i := range atn.lexerActions {
+ actionType := a.readInt()
+ data1 := a.readInt()
+ data2 := a.readInt()
+ atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(&bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the [ATN] to determine if an ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.options.VerifyATN() {
+ return
+ }
+
+ // Verify assumptions
+ for _, state := range atn.states {
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2.transitions[0].getTarget().(type) {
+ case *StarBlockStartState:
+ _, ok := s2.transitions[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(s2.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case BlockStartState:
+ a.checkCondition(s2.getEndState() != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v) // data is 32 bits but int is at least that big
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
new file mode 100644
index 000000000..afe6c9f80
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
+ visited := NewVisitRecord()
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
new file mode 100644
index 000000000..2ae5807cd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
+ as.epsilonOnlyTransitions = false
+ }
+
+ // TODO: Check code for already present compared to the Java equivalent
+ //alreadyPresent := false
+ //for _, t := range as.transitions {
+ // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
+ // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
+ // alreadyPresent = true
+ // break
+ // }
+ // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
+ // alreadyPresent = true
+ // break
+ // }
+ //}
+ //if !alreadyPresent {
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+ //} else {
+ // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
+ //}
+}
+
+type BasicState struct {
+ BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ return &BasicState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ }
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ }
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ },
+ }
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ return &BasicBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ return &BlockEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockEnd,
+ },
+ startState: nil,
+ }
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ return &RuleStopState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStop,
+ },
+ }
+}
+
+type RuleStartState struct {
+ BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ return &RuleStartState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStart,
+ },
+ }
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ return &PlusLoopbackState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusLoopBack,
+ },
+ },
+ }
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ return &PlusBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ return &StarBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ return &StarLoopbackState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopBack,
+ },
+ }
+}
+
+type StarLoopEntryState struct {
+ BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopEntry,
+ },
+ },
+ }
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ return &LoopEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateLoopEnd,
+ },
+ }
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ return &TokensStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateTokenStart,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
new file mode 100644
index 000000000..3a515a145
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
new file mode 100644
index 000000000..bd8127b6b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(Interval) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
new file mode 100644
index 000000000..1bb0314ea
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
new file mode 100644
index 000000000..b75da9df0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
@@ -0,0 +1,450 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index into [tokens] of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of [IntStream] for a description of initializing methods.
+ index int
+
+ // tokenSource is the [TokenSource] from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens contains all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
+// tokens and will pull tokens from the given lexer channel.
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+// GetAllTokens returns all tokens currently pulled from the token source.
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(_ int) {}
+
+func (c *CommonTokenStream) Reset() {
+ c.fetchedEOF = false
+ c.tokens = make([]Token, 0)
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+ c.fetchedEOF = false
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between 'i' and [TokenEOF].
+func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ c.Fill()
+ return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
+ c.lazyInit()
+ c.Sync(interval.Stop)
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
new file mode 100644
index 000000000..7467e9b43
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
@@ -0,0 +1,150 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+// This file contains all the implementations of custom comparators used for generic collections when the
+// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
+// put the comparators in the source file for the struct themselves, but given the organization of this code is
+// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
+// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
+// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
+// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
+// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
+
+// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
+// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
+// type safety and avoid having to implement this for every type that we want to perform comparison on.
+//
+// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
+// allows us to use it in any collection instance that does not require a special hash or equals implementation.
+type ObjEqComparator[T Collectable[T]] struct{}
+
+var (
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[*ATNConfig]{}
+
+ // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
+ aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
+ dfaStateEqInst = &ObjEqComparator[*DFAState]{}
+ semctxEqInst = &ObjEqComparator[SemanticContext]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
+ pContextEqInst = &ObjEqComparator[*PredictionContext]{}
+)
+
+// Equals2 delegates to the Equals() method of type T
+func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
+ return o1.Equals(o2)
+}
+
+// Hash1 delegates to the Hash() method of type T
+func (c *ObjEqComparator[T]) Hash1(o T) int {
+
+ return o.Hash()
+}
+
+type SemCComparator[T Collectable[T]] struct{}
+
+// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type ATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().Hash()
+ return hash
+}
+
+// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
+type ATNAltConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetContext().Equals(o2.GetContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
+func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, o.GetState().GetStateNumber())
+ h = murmurUpdate(h, o.GetContext().Hash())
+ return murmurFinish(h, 2)
+}
+
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
+// and has a custom Equals() and Hash() implementation, because equality is not based on the
+// standard Hash() and Equals() methods of the ATNConfig type.
+type BaseATNConfigComparator[T Collectable[T]] struct {
+}
+
+// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
+
+ // Same pointer, must be equal, even if both nil
+ //
+ if o1 == o2 {
+ return true
+
+ }
+
+ // If either are nil, but not both, then the result is false
+ //
+ if o1 == nil || o2 == nil {
+ return false
+ }
+
+ return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
+ o1.GetAlt() == o2.GetAlt() &&
+ o1.GetSemanticContext().Equals(o2.GetSemanticContext())
+}
+
+// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
+// delegates to the standard Hash() method of the ATNConfig type.
+func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+ return o.Hash()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
new file mode 100644
index 000000000..c2b724514
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
@@ -0,0 +1,214 @@
+package antlr
+
+type runtimeConfiguration struct {
+ statsTraceStacks bool
+ lexerATNSimulatorDebug bool
+ lexerATNSimulatorDFADebug bool
+ parserATNSimulatorDebug bool
+ parserATNSimulatorTraceATNSim bool
+ parserATNSimulatorDFADebug bool
+ parserATNSimulatorRetryDebug bool
+ lRLoopEntryBranchOpt bool
+ memoryManager bool
+}
+
+// Global runtime configuration
+var runtimeConfig = runtimeConfiguration{
+ lRLoopEntryBranchOpt: true,
+}
+
+type runtimeOption func(*runtimeConfiguration) error
+
+// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
+// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
+// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
+// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
+// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
+// memory allocation type used by the runtime such as sync.Pool or not.
+//
+// The options are applied in the order they are passed in, so the last option will override any previous options.
+//
+// For example, if you want to turn on the collection create point stack flag to true, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// If you want to turn it off, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func ConfigureRuntime(options ...runtimeOption) error {
+ for _, option := range options {
+ err := option(&runtimeConfig)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
+// certain structs, such as collections, or the use point of certain methods such as Put().
+// Because this can be expensive, it is turned off by default. However, it
+// can be useful to track down exactly where memory is being created and used.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func WithStatsTraceStacks(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.statsTraceStacks = trace
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
+func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
+func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
+func WithParserATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
+// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
+func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorTraceATNSim = trace
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
+func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// Only useful to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
+func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorRetryDebug = debug
+ return nil
+ }
+}
+
+// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
+// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
+//
+// Note that default is to use this optimization.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
+func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lRLoopEntryBranchOpt = off
+ return nil
+ }
+}
+
+// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
+// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
+// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
+// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
+// grammar, this may help make it more practical.
+//
+// Note that default is to use normal Go memory allocation and not pool memory.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
+//
+// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
+// and should remember to nil out any references to the parser or lexer when you are done with them.
+func WithMemoryManager(use bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.memoryManager = use
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
new file mode 100644
index 000000000..6b63eb158
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
+// reach and the transitions between them.
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there. Go maps implement key hash collisions and so on and are very
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
+ // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
+ // to see if they really are the same object. Hence, we have our own map storage.
+ //
+ states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
+
+ numstates int
+
+ s0 *DFAState
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ dfa := &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: nil, // Lazy initialize
+ }
+ if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
+ dfa.precedenceDfa = true
+ dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
+ dfa.s0.isAcceptState = false
+ dfa.s0.requiresFullContext = false
+ }
+ return dfa
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.states = nil // Lazy initialize
+ d.numstates = 0
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewATNConfigSet(false))
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
+// instantiation of the states JMap.
+func (d *DFA) Len() int {
+ if d.states == nil {
+ return 0
+ }
+ return d.states.Len()
+}
+
+// Get returns a state that matches s if it is present in the DFA state set. We defer to this
+// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
+func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ return nil, false
+ }
+ return d.states.Get(s)
+}
+
+func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
+ }
+ return d.states.Put(s)
+}
+
+func (d *DFA) getS0() *DFAState {
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0 = s
+}
+
+// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
+func (d *DFA) sortedStates() []*DFAState {
+ if d.states == nil {
+ return []*DFAState{}
+ }
+ vs := d.states.SortedSlice(func(i, j *DFAState) bool {
+ return i.stateNumber < j.stateNumber
+ })
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
new file mode 100644
index 000000000..0e1100989
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
new file mode 100644
index 000000000..654143074
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1, a2,..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an."
+//
+// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
+// states the [ATN] could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
+// state (ala normal conversion) and a [RuleContext] describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A [DFAState] may have multiple references to a particular state, but with
+// different [ATN] contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs *ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() []int {
+ var alts []int
+
+ if d.configs != nil {
+ for _, c := range d.configs.configs {
+ alts = append(alts, c.GetAlt())
+ }
+ }
+
+ if len(alts) == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) Hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.Hash())
+ return murmurFinish(h, 1)
+}
+
+// Equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) Equals(o Collectable[*DFAState]) bool {
+ if d == o {
+ return true
+ }
+
+ return d.configs.Equals(o.(*DFAState).configs)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
new file mode 100644
index 000000000..bd2cd8bc3
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
@@ -0,0 +1,110 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+//
Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+//
Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+//
Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.configs {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
new file mode 100644
index 000000000..21a021643
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// SyntaxError prints messages to System.err containing the
+// values of line, charPositionInLine, and msg using
+// the following format:
+//
+// line :
+func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
+ _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
new file mode 100644
index 000000000..9db2be1c7
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
@@ -0,0 +1,702 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
+// error reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+//
The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// ReportError is the default implementation of error reporting.
+// It returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls [beginErrorCondition]
+// and dispatches the Reporting task based on the runtime type of e
+// according to the following table.
+//
+// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
+// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
+// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
+// All other types : Calls [NotifyErrorListeners] to Report the exception
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// Recover is the default recovery implementation.
+// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
+// loosely the set of tokens that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.GetErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// Sync is the default implementation of error strategy synchronization.
+//
+// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
+// at this point in the [ATN]. You can call this anytime but ANTLR only
+// generates code to check before sub-rules/loops and each iteration.
+//
+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
+// sub-rules. E.g.:
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+// At the start of a sub-rule upon error, Sync performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub-rule is optional
+//
+// ({@code (...)?}, {@code (...)*},
+//
+// or a block with an empty alternative), then the expected set includes what follows
+// the sub-rule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// # Origins
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule:
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a bit of effort because the parser has to
+// compare the token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off this
+// functionality by simply overriding this method as empty:
+//
+// { }
+//
+// [Jim Idle]: https://github.com/jimidle
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+//
+// See also [ReportError]
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportUnwantedToken is called to report a syntax error that requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is the current LT(1) symbol and has not yet been
+// removed from the input stream. When this method returns,
+// recognizer is in error recovery mode.
+//
+// This method is called when singleTokenDeletion identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling
+// [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// ReportMissingToken is called to report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time this
+// method is called, the missing token has not yet been inserted. When this
+// method returns, recognizer is in error recovery mode.
+//
+// This method is called when singleTokenInsertion identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The RecoverInline default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, this method panics with [InputMisMatchException}.
+// TODO: Not sure that panic() is the right thing to do here - JI
+//
+// # EXTRA TOKEN (single token deletion)
+//
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the LA(2) token) as the successful result of the Match operation.
+//
+// # This recovery strategy is implemented by singleTokenDeletion
+//
+// # MISSING TOKEN (single token insertion)
+//
+// If current token -at LA(1) - is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's [TokenFactory] to create it on the fly. The
+// “insertion” is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by [SingleTokenInsertion].
+//
+// # Example
+//
+// For example, Input i=(3 is clearly missing the ')'. When
+// the parser returns from the nested call to expr, it will have
+// call the chain:
+//
+// stat → expr → atom
+//
+// and it will be trying to Match the ')' at this point in the
+// derivation:
+//
+// : ID '=' '(' INT ')' ('+' atom)* ';'
+// ^
+//
+// The attempt to [Match] ')' will fail when it sees ';' and
+// call [RecoverInline]. To recover, it sees that LA(1)==';'
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ return nil
+}
+
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by [RecoverInline] if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether single-token insertion is viable by
+// checking if the LA(1) input symbol could be successfully Matched
+// if it were instead the LA(2) symbol. If this method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce this behavior.
+//
+// This func returns true if single-token insertion is a viable recovery
+// strategy for the current mismatched input.
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// SingleTokenDeletion implements the single-token deletion inline error recovery
+// strategy. It is called by [RecoverInline] to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// recognizer will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, this method calls
+// [ReportUnwantedToken] to Report the error, followed by
+// [Consume] to actually “delete” the extraneous token. Then,
+// before returning, [ReportMatch] is called to signal a successful
+// Match.
+//
+// The func returns the successfully Matched [Token] instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise nil.
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// GetMissingSymbol conjures up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example:
+//
+// x=ID {f($x)}.
+//
+// The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// this token is missing, and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a [CommonToken] of the appropriate type. The text will be the token name.
+// If you need to change which tokens must be created by the lexer,
+// override this method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO: matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// GetTokenErrorDisplay determines how a token should be displayed in an error message.
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override this func in that case
+// to use t.String() (which, for [CommonToken], dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a new type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack. This amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+//
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # Example
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r or any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider the grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+// ;
+//
+// b : c '^' INT
+// ;
+//
+// c : ID
+// | INT
+// ;
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input “[]”, the call chain is
+//
+// a → b → c
+//
+// and, hence, the follow context stack is:
+//
+// Depth Follow set Start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets - the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c:
+//
+// {']','^'}
+//
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
+// [A note on error recovery in recursive descent parsers].
+//
+// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers]
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
+// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+//
+// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
+// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
+// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+// by immediately canceling the parse operation with a
+// [ParseCancellationException]. The implementation ensures that the
+// [ParserRuleContext//exception] field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+// This error strategy is useful in the following scenarios.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of [BailErrorStrategy.Sync] improves the performance of
+// the first stage.
+//
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the [BailErrorStrategy] avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Recover Instead of recovering from exception e, re-panic it wrapped
+// in a [ParseCancellationException] so it is not caught by the
+// rule func catches. Use Exception.GetCause() to get the
+// original [RecognitionException].
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+}
+
+// RecoverInline makes sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Sync makes sure we don't attempt to recover from problems in sub-rules.
+func (b *BailErrorStrategy) Sync(_ Parser) {
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go
new file mode 100644
index 000000000..8f0f2f601
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go
@@ -0,0 +1,259 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+
+ // The current Token when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ //
+ t.offendingToken = nil
+
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
+ // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
+ //
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+//
If the state number is not known, b method returns -1.
+
+// getExpectedTokens gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time this exception was raised.
+//
+// If the set of expected tokens is not known and could not be computed,
+// this method returns nil.
+//
+// The func returns the set of token types that could potentially follow the current
+// state in the {ATN}, or nil if the information is not available.
+
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs *ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs *ATNConfigSet
+}
+
+// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error.
+//
+// Reported by [ReportNoViableAlternative]
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)
+ n.deadEndConfigs = deadEndConfigs
+
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it.
+ //
+ // At the time the error occurred, of course the stream needs to keep a
+ // buffer of all the tokens, but later we might not have access to those.
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func (p ParseCancellationException) GetOffendingToken() Token {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetMessage() string {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetInputStream() IntStream {
+ //TODO implement me
+ panic("implement me")
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
new file mode 100644
index 000000000..5f65f809b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ InputStream
+ filename string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func(f *os.File) {
+ errF := f.Close()
+ if errF != nil {
+ }
+ }(f)
+
+ reader := bufio.NewReader(f)
+ fInfo, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &FileStream{
+ InputStream: InputStream{
+ index: 0,
+ name: fileName,
+ },
+ filename: fileName,
+ }
+
+ // Pre-build the buffer and read runes efficiently
+ //
+ fs.data = make([]rune, 0, fInfo.Size())
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ break
+ }
+ fs.data = append(fs.data, r)
+ }
+ fs.size = len(fs.data) // Size in runes
+
+ // All done.
+ //
+ return fs, nil
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
new file mode 100644
index 000000000..b737fe85f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "io"
+)
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+// NewIoStream creates a new input stream from the given io.Reader reader.
+// Note that the reader is read completely into memory and so it must actually
+// have a stopping point - you cannot pass in a reader on an open-ended source such
+// as a socket for instance.
+func NewIoStream(reader io.Reader) *InputStream {
+
+ rReader := bufio.NewReader(reader)
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ }
+
+ // Pre-build the buffer and read runes reasonably efficiently given that
+ // we don't exactly know how big the input is.
+ //
+ is.data = make([]rune, 0, 512)
+ for {
+ r, _, err := rReader.ReadRune()
+ if err != nil {
+ break
+ }
+ is.data = append(is.data, r)
+ }
+ is.size = len(is.data) // number of runes
+ return is
+}
+
+// NewInputStream creates a new input stream from the given string
+func NewInputStream(data string) *InputStream {
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ data: []rune(data), // This is actually the most efficient way
+ }
+ is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+// Consume moves the input pointer to the next character in the input stream
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+// LA returns the character at the given offset from the start of the input stream
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+// LT returns the character at the given offset from the start of the input stream
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+// Index returns the current offset in to the input stream
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+// Size returns the total number of characters in the input stream
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// Mark does nothing here as we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+// Release does nothing here as we have entire buffer
+func (is *InputStream) Release(_ int) {
+}
+
+// Seek the input point to the provided index offset
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+// GetText returns the text from the input stream from the start to the stop index
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
+// character of the stop token
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return ""
+}
+
+// String returns the entire input stream as a string
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
new file mode 100644
index 000000000..4778878bd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
new file mode 100644
index 000000000..cc5066067
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
@@ -0,0 +1,330 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+// NewInterval creates a new interval with the given start and stop values.
+func NewInterval(start, stop int) Interval {
+ return Interval{
+ Start: start,
+ Stop: stop,
+ }
+}
+
+// Contains returns true if the given item is contained within the interval.
+func (i Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+// String generates a string representation of the interval.
+func (i Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+// Length returns the length of the interval.
+func (i Interval) Length() int {
+ return i.Stop - i.Start
+}
+
+// IntervalSet represents a collection of [Intervals], which may be read-only.
+type IntervalSet struct {
+ intervals []Interval
+ readOnly bool
+}
+
+// NewIntervalSet creates a new empty, writable, interval set.
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) Equals(other *IntervalSet) bool {
+ if len(i.intervals) != len(other.intervals) {
+ return false
+ }
+
+ for k, v := range i.intervals {
+ if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ iLen := 0
+
+ for _, v := range i.intervals {
+ iLen += v.Length()
+ }
+
+ return iLen
+}
+
+func (i *IntervalSet) removeRange(v Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) GetIntervals() []Interval {
+ return i.intervals
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
new file mode 100644
index 000000000..ceccd96d2
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
@@ -0,0 +1,685 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "container/list"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+type CollectionSource int
+type CollectionDescriptor struct {
+ SybolicName string
+ Description string
+}
+
+const (
+ UnknownCollection CollectionSource = iota
+ ATNConfigLookupCollection
+ ATNStateCollection
+ DFAStateCollection
+ ATNConfigCollection
+ PredictionContextCollection
+ SemanticContextCollection
+ ClosureBusyCollection
+ PredictionVisitedCollection
+ MergeCacheCollection
+ PredictionContextCacheCollection
+ AltSetCollection
+ ReachSetCollection
+)
+
+var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
+ UnknownCollection: {
+ SybolicName: "UnknownCollection",
+ Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
+ },
+ ATNConfigCollection: {
+ SybolicName: "ATNConfigCollection",
+ Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "For instance, it is used to store the results of the closure() operation in the ATN.",
+ },
+ ATNConfigLookupCollection: {
+ SybolicName: "ATNConfigLookupCollection",
+ Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
+ },
+ ATNStateCollection: {
+ SybolicName: "ATNStateCollection",
+ Description: "ATNState collection. This is used to store the states of the ATN.",
+ },
+ DFAStateCollection: {
+ SybolicName: "DFAStateCollection",
+ Description: "DFAState collection. This is used to store the states of the DFA.",
+ },
+ PredictionContextCollection: {
+ SybolicName: "PredictionContextCollection",
+ Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
+ },
+ SemanticContextCollection: {
+ SybolicName: "SemanticContextCollection",
+ Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
+ },
+ ClosureBusyCollection: {
+ SybolicName: "ClosureBusyCollection",
+ Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
+ "It stores ATNConfigs that are currently being processed in the closure() operation.",
+ },
+ PredictionVisitedCollection: {
+ SybolicName: "PredictionVisitedCollection",
+ Description: "A map that records whether we have visited a particular context when searching through cached entries.",
+ },
+ MergeCacheCollection: {
+ SybolicName: "MergeCacheCollection",
+ Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
+ },
+ PredictionContextCacheCollection: {
+ SybolicName: "PredictionContextCacheCollection",
+ Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
+ },
+ AltSetCollection: {
+ SybolicName: "AltSetCollection",
+ Description: "Used to eliminate duplicate alternatives in an ATN config set.",
+ },
+ ReachSetCollection: {
+ SybolicName: "ReachSetCollection",
+ Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
+ },
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+ stats *JStatRec
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ s.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ s.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(s.stats)
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
+
+ if collectStats {
+ s.stats.Puts++
+ }
+ kh := s.comparator.Hash1(value)
+
+ var hClash bool
+ for _, v1 := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(value, v1) {
+ if collectStats {
+ s.stats.PutHits++
+ s.stats.PutHashConflicts++
+ }
+ return v1, true
+ }
+ if collectStats {
+ s.stats.PutMisses++
+ }
+ }
+ if collectStats && hClash {
+ s.stats.PutHashConflicts++
+ }
+ s.store[kh] = append(s.store[kh], value)
+
+ if collectStats {
+ if len(s.store[kh]) > s.stats.MaxSlotSize {
+ s.stats.MaxSlotSize = len(s.store[kh])
+ }
+ }
+ s.len++
+ if collectStats {
+ s.stats.CurSize = s.len
+ if s.len > s.stats.MaxSize {
+ s.stats.MaxSize = s.len
+ }
+ }
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) {
+ if collectStats {
+ s.stats.Gets++
+ }
+ kh := s.comparator.Hash1(key)
+ var hClash bool
+ for _, v := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(key, v) {
+ if collectStats {
+ s.stats.GetHits++
+ s.stats.GetHashConflicts++
+ }
+ return v, true
+ }
+ if collectStats {
+ s.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ s.stats.GetHashConflicts++
+ }
+ s.stats.GetNoEnt++
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool {
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ vs = append(vs, e...)
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+ stats *JStatRec
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
+ m := &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
+ if collectStats {
+ m.stats.Puts++
+ }
+ kh := m.comparator.Hash1(key)
+
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.PutHits++
+ m.stats.PutHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.PutHashConflicts++
+ }
+ }
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ if collectStats {
+ if len(m.store[kh]) > m.stats.MaxSlotSize {
+ m.stats.MaxSlotSize = len(m.store[kh])
+ }
+ }
+ m.len++
+ if collectStats {
+ m.stats.CurSize = m.len
+ if m.len > m.stats.MaxSize {
+ m.stats.MaxSize = m.len
+ }
+ }
+ return val, false
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+ if collectStats {
+ m.stats.Gets++
+ }
+ var none V
+ kh := m.comparator.Hash1(key)
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.GetHits++
+ m.stats.GetHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.GetHashConflicts++
+ }
+ m.stats.GetNoEnt++
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return m.len
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
+
+type JPCMap struct {
+ store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
+ size int
+ stats *JStatRec
+}
+
+func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
+ m := &JPCMap{
+ store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+ // Do we have a map stored by k1?
+ //
+ m2, present := pcm.store.Get(k1)
+ if present {
+ if collectStats {
+ pcm.stats.GetHits++
+ }
+ // We found a map of values corresponding to k1, so now we need to look up k2 in that map
+ //
+ return m2.Get(k2)
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
+
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ // First does a map already exist for k1?
+ //
+ if m2, present := pcm.store.Get(k1); present {
+ if collectStats {
+ pcm.stats.PutHits++
+ }
+ _, present = m2.Put(k2, v)
+ if !present {
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ }
+ } else {
+ // No map found for k1, so we create it, add in our value, then store is
+ //
+ if collectStats {
+ pcm.stats.PutMisses++
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
+ } else {
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
+ }
+
+ m2.Put(k2, v)
+ pcm.store.Put(k1, m2)
+ pcm.size++
+ }
+}
+
+type JPCMap2 struct {
+ store map[int][]JPCEntry
+ size int
+ stats *JStatRec
+}
+
+type JPCEntry struct {
+ k1, k2, v *PredictionContext
+}
+
+func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
+ m := &JPCMap2{
+ store: make(map[int][]JPCEntry, 1000),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func dHash(k1, k2 *PredictionContext) int {
+ return k1.cachedHash*31 + k2.cachedHash
+}
+
+func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.GetHits++
+ pcm.stats.GetHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.GetHashConflicts++
+ }
+ pcm.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.PutHits++
+ pcm.stats.PutHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.PutHashConflicts++
+ }
+ }
+ pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ return nil, false
+}
+
+type VisitEntry struct {
+ k *PredictionContext
+ v *PredictionContext
+}
+type VisitRecord struct {
+ store map[*PredictionContext]*PredictionContext
+ len int
+ stats *JStatRec
+}
+
+type VisitList struct {
+ cache *list.List
+ lock sync.RWMutex
+}
+
+var visitListPool = VisitList{
+ cache: list.New(),
+ lock: sync.RWMutex{},
+}
+
+// NewVisitRecord returns a new VisitRecord instance from the pool if available.
+// Note that this "map" uses a pointer as a key because we are emulating the behavior of
+// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
+// which means is the key the same reference to an object rather than is it .equals() to another
+// object.
+func NewVisitRecord() *VisitRecord {
+ visitListPool.lock.Lock()
+ el := visitListPool.cache.Front()
+ defer visitListPool.lock.Unlock()
+ var vr *VisitRecord
+ if el == nil {
+ vr = &VisitRecord{
+ store: make(map[*PredictionContext]*PredictionContext),
+ }
+ if collectStats {
+ vr.stats = &JStatRec{
+ Source: PredictionContextCacheCollection,
+ Description: "VisitRecord",
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ vr.stats.CreateStack = debug.Stack()
+ }
+ }
+ } else {
+ vr = el.Value.(*VisitRecord)
+ visitListPool.cache.Remove(el)
+ vr.store = make(map[*PredictionContext]*PredictionContext)
+ }
+ if collectStats {
+ Statistics.AddJStatRec(vr.stats)
+ }
+ return vr
+}
+
+func (vr *VisitRecord) Release() {
+ vr.len = 0
+ vr.store = nil
+ if collectStats {
+ vr.stats.MaxSize = 0
+ vr.stats.CurSize = 0
+ vr.stats.Gets = 0
+ vr.stats.GetHits = 0
+ vr.stats.GetMisses = 0
+ vr.stats.GetHashConflicts = 0
+ vr.stats.GetNoEnt = 0
+ vr.stats.Puts = 0
+ vr.stats.PutHits = 0
+ vr.stats.PutMisses = 0
+ vr.stats.PutHashConflicts = 0
+ vr.stats.MaxSlotSize = 0
+ }
+ visitListPool.lock.Lock()
+ visitListPool.cache.PushBack(vr)
+ visitListPool.lock.Unlock()
+}
+
+func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Gets++
+ }
+ v := vr.store[k]
+ if v != nil {
+ if collectStats {
+ vr.stats.GetHits++
+ }
+ return v, true
+ }
+ if collectStats {
+ vr.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Puts++
+ }
+ vr.store[k] = v
+ vr.len++
+ if collectStats {
+ vr.stats.CurSize = vr.len
+ if vr.len > vr.stats.MaxSize {
+ vr.stats.MaxSize = vr.len
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
new file mode 100644
index 000000000..3c7896a91
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something non nil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+//goland:noinspection GoUnusedConst
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) Reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+
+ ttype := b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+}
+
+// Skip instructs the lexer to Skip creating a token for current lexer rule
+// and look for another token. [NextToken] knows to keep looking when
+// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
+// will be in force.
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
+// current lexer mode to the supplied mode m.
+func (b *BaseLexer) PushMode(m int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
+// return to.
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.Reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// EmitToken by default does not support multiple emits per [NextToken] invocation
+// for efficiency reasons. Subclass and override this func, [NextToken],
+// and [GetToken] (to push tokens into a list and pull from that list
+// rather than a single variable as this implementation does).
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// Emit is the standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override this method to emit
+// custom [Token] objects or provide a new factory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+// EmitEOF emits an EOF token. By default, this is the last token emitted
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// GetCharIndex returns the index of the current character of lookahead
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// GetText returns the text Matched so far for the current token or any text override.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+// SetText sets the complete text of this token; it wipes any previous changes to the text.
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+// GetATN returns the ATN used by the lexer.
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// GetAllTokens returns a list of all [Token] objects in input char stream.
+// Forces a load of all tokens that can be made from the input char stream.
+//
+// Does not include EOF token.
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Recover can normally Match any char in its vocabulary after Matching
+// a token, so here we do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+//
+// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
+// a character that makes no sense to the recognizer.
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
new file mode 100644
index 000000000..eaa7393e0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ // LexerActionTypeChannel represents a [LexerChannelAction] action.
+ LexerActionTypeChannel = 0
+
+ // LexerActionTypeCustom represents a [LexerCustomAction] action.
+ LexerActionTypeCustom = 1
+
+ // LexerActionTypeMode represents a [LexerModeAction] action.
+ LexerActionTypeMode = 2
+
+ // LexerActionTypeMore represents a [LexerMoreAction] action.
+ LexerActionTypeMore = 3
+
+ // LexerActionTypePopMode represents a [LexerPopModeAction] action.
+ LexerActionTypePopMode = 4
+
+ // LexerActionTypePushMode represents a [LexerPushModeAction] action.
+ LexerActionTypePushMode = 5
+
+ // LexerActionTypeSkip represents a [LexerSkipAction] action.
+ LexerActionTypeSkip = 6
+
+ // LexerActionTypeType represents a [LexerTypeAction] action.
+ LexerActionTypeType = 7
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ Hash() int
+ Equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(_ Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, b.actionType)
+ return murmurFinish(h, 1)
+}
+
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
+ return b.actionType == other.getActionType()
+}
+
+// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
+//
+// The Skip command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+// String returns a string representation of the current [LexerSkipAction].
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+func (b *LexerSkipAction) Equals(other LexerAction) bool {
+ return other.getActionType() == LexerActionTypeSkip
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// LexerPushModeAction implements the pushMode lexer action by calling
+// [Lexer.pushMode] with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
+//
+// The popMode command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+//
This action is implemented by calling {@link Lexer//popMode}.
This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+//
This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+//
Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// LexerChannelAction implements the channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+type LexerChannelAction struct {
+ *BaseLexerAction
+ channel int
+}
+
+// NewLexerChannelAction creates a channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+//
This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+//
This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
+// with a [LexerAction].
+//
+// Note: This class is only required for lexer actions for which
+// [LexerAction.isPositionDependent] returns true.
+//
+// The offset points into the input [CharStream], relative to
+// the token start index, at which the specified lexerAction should be
+// executed.
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+//
This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.Hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 000000000..dfc28c32b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+//
The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link ATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(0)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+ l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
+
+ return l
+}
+
+// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
+// the input [LexerActionExecutor] followed by a specified
+// [LexerAction].
+// TODO: This does not match the Java code
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// [LexerAction.isPositionDependent] returns true, it calls
+// [IntStream.Seek] on the input [CharStream] to set the input
+// position to the end of the current token. This behavior provides
+// for efficient [DFA] representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the [ATN], the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the [DFA] representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same Length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns this instance.
+//
+// The offset is assigned to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// The func returns a [LexerActionExecutor] that stores input stream offsets
+// for all position-dependent lexer actions.
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
+ updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ }
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+//
This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
new file mode 100644
index 000000000..fe938b025
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
@@ -0,0 +1,677 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache *JPCMap2
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := &LexerATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+
+ // line number 1..n within the input
+ l.Line = 1
+
+ // The index of the character relative to the beginning of the line
+ // 0..n-1
+ l.CharPositionInLine = 0
+
+ l.mode = LexerDefaultMode
+
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ var s0 *DFAState
+ l.atn.stateMu.RLock()
+ s0 = dfa.getS0()
+ l.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure, suppressEdge)
+
+ predict := l.execATN(input, next)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-Length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes new src/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ l.atn.edgeMu.RLock()
+ defer l.atn.edgeMu.RUnlock()
+ if s.getEdges() == nil {
+ return nil
+ }
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if runtimeConfig.lexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
+// computed state and corresponding edge to the [DFA].
+//
+// The func returns the computed target [DFA] state for the given input symbol t.
+// If this does not lead to a valid [DFA] state, this method
+// returns ATNSimulatorError.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a fail-over from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
+// we can reach upon input t.
+//
+// Parameter reach is a return parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a runtimeConfig that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.configs {
+ currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
+ if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := t == TokenEOF
+ config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// closure since the alternatives within any lexer decision are ordered by
+// preference, this method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from runtimeConfig, all other (potentially reachable) states for
+// this rule would have a lower priority.
+//
+// The func returns true if an accept state is reached.
+func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
+ configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
+
+ var cfg *ATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.hasSemanticContext = true
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In this case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// evaluatePredicate eEvaluates a predicate specified in the lexer.
+//
+// If speculative is true, this method was called before
+// [consume] for the Matched character. This method should call
+// [consume] before evaluating the predicate to ensure position
+// sensitive values, including [GetText], [GetLine],
+// and [GetColumn], properly reflect the current
+// lexer state. This method should restore input and the simulator
+// to the original state before returning, i.e. undo the actions made by the
+// call to [Consume].
+//
+// The func returns true if the specified predicate evaluates to true.
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ //
+ suppressEdge := cfgs.hasSemanticContext
+ cfgs.hasSemanticContext = false
+ to = l.addDFAState(cfgs, true)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ l.atn.edgeMu.Lock()
+ defer l.atn.edgeMu.Unlock()
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState *ATNConfig
+
+ for _, cfg := range configs.configs {
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ dfa := l.decisionToDFA[l.mode]
+
+ l.atn.stateMu.Lock()
+ defer l.atn.stateMu.Unlock()
+ existing, present := dfa.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
+ proposed = existing
+ } else {
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.Len()
+ configs.readOnly = true
+ configs.configLookup = nil // Not needed now
+ proposed.configs = configs
+ dfa.Put(proposed)
+ }
+ if !suppressEdge {
+ dfa.setS0(proposed)
+ }
+ return proposed
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// GetText returns the text [Match]ed so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 000000000..4955ac876
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+const (
+ // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
+ // a predicate during analysis if
+ //
+ // seeThruPreds==false
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
+
+ // Wipe out lookahead for la alternative if we found nothing,
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// Look computes the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+//
+// If ctx is nil and the end of the rule containing
+// s is reached, [EPSILON] is added to the result set.
+//
+// If ctx is not nil and the end of the outermost rule is
+// reached, [EOF] is added to the result set.
+//
+// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
+// [BlockEndState] to detect epsilon paths through a closure.
+//
+// Parameter ctx is the complete parser context, or nil if the context
+// should be ignored
+//
+// The func returns the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ var lookContext *PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
+ NewBitSet(), true, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+//
If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx.pcType != PredictionContextEmpty {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
new file mode 100644
index 000000000..923c7b52c
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
@@ -0,0 +1,47 @@
+//go:build !antlr.stats
+
+package antlr
+
+// This file is compiled when the build configuration antlr.stats is not enabled.
+// which then allows the compiler to optimize out all the code that is not used.
+const collectStats = false
+
+// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
+type goRunStats struct {
+}
+
+var Statistics = &goRunStats{}
+
+func (s *goRunStats) AddJStatRec(_ *JStatRec) {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) CollectionAnomalies() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Reset() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Report(dir string, prefix string) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func (s *goRunStats) Analyze() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+type statsOption func(*goRunStats) error
+
+func (s *goRunStats) Configure(options ...statsOption) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go
new file mode 100644
index 000000000..fb57ac15d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go
@@ -0,0 +1,700 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
+// recovery stuff.
+//
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+
+ // The ParserRuleContext object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+
+ // Specifies whether the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+
+ // When setTrace(true) is called, a reference to the
+ // TraceListener is stored here, so it can be easily removed in a
+ // later call to setTrace(false). The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+
+ // The list of ParseTreeListener listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time NotifyErrorListeners is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// This field maps from the serialized ATN string to the deserialized [ATN] with
+// bypass alternatives.
+//
+// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
+//
+//goland:noinspection GoUnusedGlobalVariable
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.HasError() {
+ return nil
+ }
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+//
If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// AddParseListener registers listener to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// RemoveParseListener removes listener from the list of parse listeners.
+//
+// If listener is nil or has not been added as a parse
+// listener, this func does nothing.
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// lazily.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO - Implement this?
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 000000000..ae2869692
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewATNConfig4(config, t.getTarget())
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the runtimeConfig sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("runtimeConfig from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ;, it has a [DFA]
+// state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]].
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node
+// because alternative to has another way to continue, via
+//
+// [6|2|[]].
+//
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state config-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd non-conflicting configuration for a different alternative:
+//
+// [1|1|[], 1|2|[], 8|3|[]].
+//
+// This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, so we do not
+// stop working on this state.
+//
+// In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.uniqueAlt != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.uniqueAlt)
+ } else {
+ conflictingAlts = configs.conflictingAlts
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in [AdaptivePredict] around [execATN], but I cut
+// it out for clarity now that alg. works well. We can leave this
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.configs {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+//
If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// addDFAState adds state D to the [DFA] if it is not already present, and returns
+// the actual instance stored in the [DFA]. If a state equivalent to D
+// is already in the [DFA], the existing state is returned. Otherwise, this
+// method returns D after adding it to the [DFA].
+//
+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
+// does not change the DFA.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+
+ existing, present := dfa.Get(d)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state will be added if not already there or we will be given back the existing state struct
+ // if it is present.
+ //
+ d.stateNumber = dfa.Len()
+ if !d.configs.readOnly {
+ d.configs.OptimizeConfigs(&p.BaseATNSimulator)
+ d.configs.readOnly = true
+ d.configs.configLookup = nil
+ }
+ dfa.Put(d)
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
+//
+// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
+// so that they can see that this is happening and can take action if they want to.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
new file mode 100644
index 000000000..c249bc138
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
@@ -0,0 +1,421 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+ InitBaseParserRuleContext(prc, parent, invokingStateNumber)
+ return prc
+}
+
+func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
+ // What context invoked b rule?
+ prc.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ prc.invokingState = -1
+ } else {
+ prc.invokingState = invokingStateNumber
+ }
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// EnterRule is called when any rule is entered.
+func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
+}
+
+// ExitRule is called when any rule is exited.
+func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
+// we entered a rule. If we have a label, we will need to remove
+// the generic ruleContext object.
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+func (prc *BaseParserRuleContext) SetParent(v Tree) {
+ if v == nil {
+ prc.parentCtx = nil
+ } else {
+ prc.parentCtx = v.(RuleContext)
+ }
+}
+
+func (prc *BaseParserRuleContext) GetInvokingState() int {
+ return prc.invokingState
+}
+
+func (prc *BaseParserRuleContext) SetInvokingState(t int) {
+ prc.invokingState = t
+}
+
+func (prc *BaseParserRuleContext) GetRuleIndex() int {
+ return prc.RuleIndex
+}
+
+func (prc *BaseParserRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
+
+// IsEmpty returns true if the context of b is empty.
+//
+// A context is empty if there is no invoking state, meaning nobody calls
+// current context.
+func (prc *BaseParserRuleContext) IsEmpty() bool {
+ return prc.invokingState == -1
+}
+
+// GetParent returns the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of this
+// method.
+func (prc *BaseParserRuleContext) GetParent() Tree {
+ return prc.parentCtx
+}
+
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
new file mode 100644
index 000000000..c1b80cc1f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
@@ -0,0 +1,727 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+const (
+ // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
+ // doesn't mean wildcard:
+ //
+ // $ + x = [$,x]
+ //
+ // Here,
+ //
+ // $ = EmptyReturnState
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
+//
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+const (
+ PredictionContextEmpty = iota
+ PredictionContextSingleton
+ PredictionContextArray
+)
+
+// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
+// emulate inheritance from Java, and can be used without an interface definition. An interface
+// is not required because no user code will ever need to implement this interface.
+type PredictionContext struct {
+ cachedHash int
+ pcType int
+ parentCtx *PredictionContext
+ returnState int
+ parents []*PredictionContext
+ returnStates []int
+}
+
+func NewEmptyPredictionContext() *PredictionContext {
+ nep := &PredictionContext{}
+ nep.cachedHash = calculateEmptyHash()
+ nep.pcType = PredictionContextEmpty
+ nep.returnState = BasePredictionContextEmptyReturnState
+ return nep
+}
+
+func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
+ pc := &PredictionContext{}
+ pc.pcType = PredictionContextSingleton
+ pc.returnState = returnState
+ pc.parentCtx = parent
+ if parent != nil {
+ pc.cachedHash = calculateHash(parent, returnState)
+ } else {
+ pc.cachedHash = calculateEmptyHash()
+ }
+ return pc
+}
+
+func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ nec := &PredictionContext{}
+ nec.cachedHash = hash
+ nec.pcType = PredictionContextArray
+ nec.parents = parents
+ nec.returnStates = returnStates
+ return nec
+}
+
+func (p *PredictionContext) Hash() int {
+ return p.cachedHash
+}
+
+func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ otherP := other.(*PredictionContext)
+ return other == nil || otherP == nil || otherP.isEmpty()
+ case PredictionContextSingleton:
+ return p.SingletonEquals(other)
+ case PredictionContextArray:
+ return p.ArrayEquals(other)
+ }
+ return false
+}
+
+func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
+ if o == nil {
+ return false
+ }
+ other := o.(*PredictionContext)
+ if other == nil || other.pcType != PredictionContextArray {
+ return false
+ }
+ if p.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(p.returnStates, other.returnStates) &&
+ slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
+ if other == nil {
+ return false
+ }
+ otherP := other.(*PredictionContext)
+ if otherP == nil {
+ return false
+ }
+
+ if p.cachedHash != otherP.Hash() {
+ return false // Can't be same if hash is different
+ }
+
+ if p.returnState != otherP.getReturnState(0) {
+ return false
+ }
+
+ // Both parents must be nil if one is
+ if p.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return p.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (p *PredictionContext) GetParent(i int) *PredictionContext {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return nil
+ case PredictionContextSingleton:
+ return p.parentCtx
+ case PredictionContextArray:
+ return p.parents[i]
+ }
+ return nil
+}
+
+func (p *PredictionContext) getReturnState(i int) int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates[i]
+ default:
+ return p.returnState
+ }
+}
+
+func (p *PredictionContext) GetReturnStates() []int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates
+ default:
+ return []int{p.returnState}
+ }
+}
+
+func (p *PredictionContext) length() int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return len(p.returnStates)
+ default:
+ return 1
+ }
+}
+
+func (p *PredictionContext) hasEmptyPath() bool {
+ switch p.pcType {
+ case PredictionContextSingleton:
+ return p.returnState == BasePredictionContextEmptyReturnState
+ }
+ return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (p *PredictionContext) String() string {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return "$"
+ case PredictionContextSingleton:
+ var up string
+
+ if p.parentCtx == nil {
+ up = ""
+ } else {
+ up = p.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if p.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(p.returnState)
+ }
+
+ return strconv.Itoa(p.returnState) + " " + up
+ case PredictionContextArray:
+ if p.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(p.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if p.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(p.returnStates[i])
+ if !p.parents[i].isEmpty() {
+ s = s + " " + p.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+ return s + "]"
+
+ default:
+ return "unknown"
+ }
+}
+
+func (p *PredictionContext) isEmpty() bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return true
+ case PredictionContextArray:
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return p.returnStates[0] == BasePredictionContextEmptyReturnState
+ default:
+ return false
+ }
+}
+
+func (p *PredictionContext) Type() int {
+ return p.pcType
+}
+
+func calculateHash(parent *PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
+ return mergeSingletons(a, b, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as wildcard
+ if rootIsWildcard {
+ if a.isEmpty() {
+ return a
+ }
+ if b.isEmpty() {
+ return b
+ }
+ }
+
+ // Convert either Singleton or Empty to arrays, so that we can merge them
+ //
+ ara := convertToArray(a)
+ arb := convertToArray(b)
+ return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
+}
+
+func convertToArray(pc *PredictionContext) *PredictionContext {
+ switch pc.Type() {
+ case PredictionContextEmpty:
+ return NewArrayPredictionContext([]*PredictionContext{}, []int{})
+ case PredictionContextSingleton:
+ return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
+ default:
+ // Already an array
+ }
+ return pc
+}
+
+// mergeSingletons merges two Singleton [PredictionContext] instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+//
Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
+//
+//
Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+//
Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ return previous
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent.Equals(a.parentCtx) {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent.Equals(b.parentCtx) {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array.
+ // New joined parent so create a new singleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent *PredictionContext
+ if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []*PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []*PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []*PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+//
Local-Context Merges
+//
+//
These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+//
{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+//
{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+//
Special case of last merge if local context.
+//
+//
+//
Full-Context Merges
+//
+//
These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+//
Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
+ if rootIsWildcard {
+ if a.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a.isEmpty() && b.isEmpty() {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a.isEmpty() { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+//
Different tops, different parents.
+//
+//
+//
Shared top, same parents.
+//
+//
+//
Shared top, different parents.
+//
+//
+//
Shared top, all shared parents.
+//
+//
+//
Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+//
+//goland:noinspection GoBoolExpressions
+func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.Put(a, b, pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
+ if M.Equals(a) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, a)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M.Equals(b) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, b)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(&mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.Put(a, b, M)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M parents and merge any Equals() ones.
+// Note that we pass a pointer to the slice as we want to modify it in place.
+//
+//goland:noinspection GoUnusedFunction
+func combineCommonParents(parents *[]*PredictionContext) {
+ uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
+
+ for p := 0; p < len(*parents); p++ {
+ parent := (*parents)[p]
+ _, _ = uniqueParents.Put(parent)
+ }
+ for q := 0; q < len(*parents); q++ {
+ pc, _ := uniqueParents.Get((*parents)[q])
+ (*parents)[q] = pc
+ }
+}
+
+func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
+ if context.isEmpty() {
+ return context
+ }
+ existing, present := visited.Get(context)
+ if present {
+ return existing
+ }
+
+ existing, present = contextCache.Get(context)
+ if present {
+ visited.Put(context, existing)
+ return existing
+ }
+ changed := false
+ parents := make([]*PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || !parent.Equals(context.GetParent(i)) {
+ if !changed {
+ parents = make([]*PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited.Put(context, context)
+ return context
+ }
+ var updated *PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited.Put(updated, updated)
+ visited.Put(context, updated)
+
+ return updated
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
new file mode 100644
index 000000000..25dfb11e8
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
@@ -0,0 +1,48 @@
+package antlr
+
+var BasePredictionContextEMPTY = &PredictionContext{
+ cachedHash: calculateEmptyHash(),
+ pcType: PredictionContextEmpty,
+ returnState: BasePredictionContextEmptyReturnState,
+}
+
+// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+type PredictionContextCache struct {
+ cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ return &PredictionContextCache{
+ cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
+ }
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a new context to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
+ if ctx.isEmpty() {
+ return BasePredictionContextEMPTY
+ }
+
+ // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
+ // the same pointer), otherwise it will add the new entry and return that.
+ //
+ existing, present := p.cache.Get(ctx)
+ if present {
+ return existing
+ }
+ p.cache.Put(ctx, ctx)
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
+ pc, exists := p.cache.Get(ctx)
+ return pc, exists
+}
+
+func (p *PredictionContextCache) length() int {
+ return p.cache.Len()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
new file mode 100644
index 000000000..3f85a6a52
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
@@ -0,0 +1,536 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ // PredictionModeSLL represents the SLL(*) prediction mode.
+ // This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the SLL prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful LL prediction abilities to complete successfully.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+
+ // PredictionModeLL represents the LL(*) prediction mode.
+ // This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+
+ // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
+ // with exact ambiguity detection.
+ //
+ // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases:
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+// # Combined SLL+LL Parsing
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative, e.g.
+//
+// {1,2} and {3,4}
+//
+// If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// # Heuristic
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ';', it has a
+// [DFA] state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]]
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node because alternative to has another way to continue,
+// via
+//
+// [6|2|[]]
+//
+// It also let's us continue for this rule:
+//
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state immediately before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// # Pure SLL Parsing
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// # Predicates in SLL+LL Parsing
+//
+// SLL decisions don't evaluate predicates until after they reach [DFA] stop
+// states because they need to create the [DFA] cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation, so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, [ATNConfigSet] combines stack contexts but not
+// semantic predicate contexts, so we might see two configurations like the
+// following:
+//
+// (s, 1, x, {}), (s, 1, x', {p})
+//
+// Before testing these configurations against others, we have to merge
+// x and x' (without modifying the existing configurations).
+// For example, we test (x+x')==x” when looking for conflicts in
+// the following configurations:
+//
+// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
+//
+// If the configuration set has predicates (as indicated by
+// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
+// the configurations to strip out all the predicates so that a standard
+// [ATNConfigSet] will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
+
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input, so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.hasSemanticContext {
+ // dup configs, tossing out semantic predicates
+ dup := NewATNConfigSet(false)
+ for _, c := range configs.configs {
+
+ // NewATNConfig({semanticContext:}, c)
+ c = NewATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar predicates
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
+func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// the func returns true if all configurations in configs are in a
+// [RuleStopState]
+func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
+
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
+//
+// Can we stop looking ahead during [ATN] simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations 'C', into
+// conflicting subsets (s, _, ctx, _) and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical [ATNConfig].state and [ATNConfig].context values
+// but a different [ATNConfig].alt value, e.g.
+//
+// (s, i, ctx, _)
+//
+// and
+//
+// (s, j, ctx, _) ; for i != j
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// A_s,ctx = {i | (s, i, ctx, _)}
+//
+// for each configuration in C holding s and ctx fixed.
+//
+// Or in pseudo-code:
+//
+// for each configuration c in C:
+// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
+//
+// The values in map are the set of
+//
+// A_s,ctx
+//
+// sets.
+//
+// If
+//
+// |A_s,ctx| = 1
+//
+// then there is no conflict associated with s and ctx.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// further lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal; you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// # Conflicting Configs
+//
+// Two configurations:
+//
+// (s, i, x) and (s, j, x')
+//
+// conflict when i != j but x = x'. Because we merge all
+// (s, i, _) configurations together, that means that there are at
+// most n configurations associated with state s for
+// n possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts x and x'.
+//
+// Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either x or x'.
+// If the x associated with lowest alternative i
+// is the superset, then i is the only possible prediction since the
+// others resolve to min(i) as well. However, if x is
+// associated with j > i then at least one stack configuration for
+// j is not in conflict with alternative i. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing an equality check between x and
+// x', which lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// # Continue/Stop Rule
+//
+// Continue if the union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives,
+//
+// [i for (_, i, _)]
+//
+// tells us which alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// Cases
+//
+// - no conflicts and more than 1 alternative in set => continue
+// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
+// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
+// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
+// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
+// {1} ∪ {3} = {1,3} => continue
+//
+// # Exact Ambiguity Detection
+//
+// If all states report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set:
+//
+// |A_i| > 1
+//
+// and
+//
+// A_i = A_j ; for all i, j
+//
+// In other words, we continue examining lookahead until all A_i
+// have more than one alternative and all A_i are the same. If
+//
+// A={{1,2}, {1,3}}
+//
+// then regular LL prediction would terminate because the resolved set is {1}.
+// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like:
+//
+// A={{1,2}}
+//
+// or
+//
+// {{1,2},{1,2}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
+// than one alternative.
+//
+// The func returns true if every [BitSet] in altsets has
+// [BitSet].cardinality cardinality > 1
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
+// exactly one alternative.
+//
+// The func returns true if altsets contains at least one [BitSet] with
+// [BitSet].cardinality cardinality 1
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
+// more than one alternative.
+//
+// The func returns true if altsets contains a [BitSet] with
+// [BitSet].cardinality cardinality > 1, otherwise false
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
+//
+// The func returns true if every member of altsets is equal to the others.
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
+// altsets. If no such alternative exists, this method returns
+// [ATNInvalidAltNumber].
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each [BitSet]
+// in altsets, being the set of represented alternatives in altsets.
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+//
+// for each configuration c in configs:
+// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
+func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
+
+ for _, c := range configs.configs {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
+//
+// for each configuration c in configs:
+// map[c.ATNConfig.state] U= c.ATNConfig.alt}
+func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.configs {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
+// if there is one.
+//
+// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
new file mode 100644
index 000000000..2e0b504fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+ HasError() bool
+ GetError() RecognitionException
+ SetError(RecognitionException)
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+ SynErr RecognitionException
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var tokenTypeMapCache = make(map[string]int)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) SetError(err RecognitionException) {
+ b.SynErr = err
+}
+
+func (b *BaseRecognizer) HasError() bool {
+ return b.SynErr != nil
+}
+
+func (b *BaseRecognizer) GetError() RecognitionException {
+ return b.SynErr
+}
+
+func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// GetRuleIndexMap Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+// GetTokenType get the token type based upon its name
+func (b *BaseRecognizer) GetTokenType(_ string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// GetErrorHeader returns the error header, normally line/character position information.
+//
+// Can be overridden in sub structs embedding BaseRecognizer.
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+//
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of [ANTLRErrorStrategy] may provide a similar
+// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// Sempred embedding structs need to override this if there are sempreds or actions
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+ return true
+}
+
+// Precpred embedding structs need to override this if there are preceding predicates
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
new file mode 100644
index 000000000..f2ad04793
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// RuleContext is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack.
+//
+// We actually carry no information about the rule associated with this context (except
+// when parsing). We keep only the state number of the invoking state from
+// the [ATN] submachine that invoked this. Contrast this with the s
+// pointer inside [ParserRuleContext] that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the struct
+// [ParserRuleContext], which embeds a RuleContext.
+//
+// @see ParserRuleContext
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
new file mode 100644
index 000000000..68cb9061e
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// SemanticContext is a tree structure used to record the semantic context in which
+//
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction p1 && p2, or a sum of products p1 || p2.
+//
+// I have scoped the AND, OR, and Predicate subclasses of
+// [SemanticContext] within the scope of this outer ``class''
+type SemanticContext interface {
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) Hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
+ return false
+ }
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
+}
+
+func (p *PrecedencePredicate) Hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v SemanticContext) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
+ if a == other {
+ return true
+ }
+ if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) Hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (o *OR) Hash() int {
+ h := murmurInit(41) // Init with o value different from AND
+ for _, op := range o.opnds {
+ h = murmurUpdate(h, op.Hash())
+ }
+ return murmurFinish(h, len(o.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Put(o)
+ }
+ } else {
+ operands.Put(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Put(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ copy(opnds, vs)
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].Equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
new file mode 100644
index 000000000..70c0673a0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
@@ -0,0 +1,281 @@
+//go:build antlr.stats
+
+package antlr
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
+// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
+//
+
+// Tells various components to collect statistics - because it is only true when this file is included, it will
+// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
+const collectStats = true
+
+// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
+// It is exported so that it can be used by others to look for things that are not already looked for in the
+// runtime statistics.
+type goRunStats struct {
+
+ // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
+ // during a run. It is exported so that it can be used by others to look for things that are not already looked for
+ // within this package.
+ //
+ jStats []*JStatRec
+ jStatsLock sync.RWMutex
+ topN int
+ topNByMax []*JStatRec
+ topNByUsed []*JStatRec
+ unusedCollections map[CollectionSource]int
+ counts map[CollectionSource]int
+}
+
+const (
+ collectionsFile = "collections"
+)
+
+var (
+ Statistics = &goRunStats{
+ topN: 10,
+ }
+)
+
+type statsOption func(*goRunStats) error
+
+// Configure allows the statistics system to be configured as the user wants and override the defaults
+func (s *goRunStats) Configure(options ...statsOption) error {
+ for _, option := range options {
+ err := option(s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
+//
+// For example, if you want to see the top 20 collections by size, you can do:
+//
+// antlr.Statistics.Configure(antlr.WithTopN(20))
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ s.topN = topN
+ return nil
+ }
+}
+
+// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
+//
+// The function gathers and analyzes a number of statistics about any particular run of
+// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
+// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
+// especially useful in tracking down bugs or performance problems when an ANTLR user could
+// supply the output from this package, but cannot supply the grammar file(s) they are using, even
+// privately to the maintainers.
+//
+// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
+// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
+// extant unless the calling program is built with the antlr.stats tag like so:
+//
+// go build -tags antlr.stats .
+//
+// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
+// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
+// [Statistics.Report] function to report the statistics.
+//
+// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
+// me [Jim Idle] directly at jimi@idle.ws
+//
+// [Jim Idle]: https:://github.com/jim-idle
+func (s *goRunStats) Analyze() {
+
+ // Look for anything that looks strange and record it in our local maps etc for the report to present it
+ //
+ s.CollectionAnomalies()
+ s.TopNCollections()
+}
+
+// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
+func (s *goRunStats) TopNCollections() {
+
+ // Let's sort the stat records by MaxSize
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].MaxSize > s.jStats[j].MaxSize
+ })
+
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByMax = append(s.topNByMax, s.jStats[i])
+ }
+
+ // Sort by the number of times used
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
+ })
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByUsed = append(s.topNByUsed, s.jStats[i])
+ }
+}
+
+// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
+// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
+// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
+func (s *goRunStats) Report(dir string, prefix string) error {
+
+ isDir, err := isDirectory(dir)
+ switch {
+ case err != nil:
+ return err
+ case !isDir:
+ return fmt.Errorf("output directory `%s` is not a directory", dir)
+ }
+ s.reportCollections(dir, prefix)
+
+ // Clean out any old data in case the user forgets
+ //
+ s.Reset()
+ return nil
+}
+
+func (s *goRunStats) Reset() {
+ s.jStats = nil
+ s.topNByUsed = nil
+ s.topNByMax = nil
+}
+
+func (s *goRunStats) reportCollections(dir, prefix string) {
+ cname := filepath.Join(dir, ".asciidoctor")
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = f.WriteString(`// .asciidoctorconfig
+++++
+
+++++`)
+ _ = f.Close()
+
+ fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
+ // If the file doesn't exist, create it, or append to the file
+ f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(f)
+ _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
+
+ _, _ = f.WriteString("== Summary\n")
+
+ if s.unusedCollections != nil {
+ _, _ = f.WriteString("=== Unused Collections\n")
+ _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
+ _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
+ _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
+ _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
+ _, _ = f.WriteString(" actually needed.\n\n")
+
+ _, _ = f.WriteString("\n.Unused collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+
+ for k, v := range s.unusedCollections {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ f.WriteString("|===\n\n")
+ }
+
+ _, _ = f.WriteString("\n.Summary of Collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+ for k, v := range s.counts {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
+ for _, c := range s.topNByMax {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
+ for _, c := range s.topNByUsed {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+}
+
+// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
+func (s *goRunStats) AddJStatRec(rec *JStatRec) {
+ s.jStatsLock.Lock()
+ defer s.jStatsLock.Unlock()
+ s.jStats = append(s.jStats, rec)
+}
+
+// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
+func (s *goRunStats) CollectionAnomalies() {
+ s.jStatsLock.RLock()
+ defer s.jStatsLock.RUnlock()
+ s.counts = make(map[CollectionSource]int, len(s.jStats))
+ for _, c := range s.jStats {
+
+ // Accumlate raw counts
+ //
+ s.counts[c.Source]++
+
+ // Look for allocated but unused collections and count them
+ if c.MaxSize == 0 && c.Puts == 0 {
+ if s.unusedCollections == nil {
+ s.unusedCollections = make(map[CollectionSource]int)
+ }
+ s.unusedCollections[c.Source]++
+ }
+ if c.MaxSize > 6000 {
+ fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
+ }
+ }
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
new file mode 100644
index 000000000..4d9eb94e5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
@@ -0,0 +1,23 @@
+package antlr
+
+// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
+// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
+// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
+// for ideas on what can be gleaned from these statistics about collections.
+type JStatRec struct {
+ Source CollectionSource
+ MaxSize int
+ CurSize int
+ Gets int
+ GetHits int
+ GetMisses int
+ GetHashConflicts int
+ GetNoEnt int
+ Puts int
+ PutHits int
+ PutMisses int
+ PutHashConflicts int
+ MaxSlotSize int
+ Description string
+ CreateStack []byte
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go
new file mode 100644
index 000000000..9670efb82
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+
+ String() string
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
+ //
+ // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+ TokenDefaultChannel = 0
+
+ // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
+ //
+ // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := &CommonToken{
+ BaseToken: BaseToken{
+ source: source,
+ tokenType: tokenType,
+ channel: channel,
+ start: start,
+ stop: stop,
+ tokenIndex: -1,
+ },
+ }
+
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
new file mode 100644
index 000000000..a3f36eaa6
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
new file mode 100644
index 000000000..bf4ff6633
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+ Reset()
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 000000000..ccf59b465
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,662 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ DefaultProgramName = "default"
+ ProgramInitSize = 100
+ MinTokenIndex = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instructionIndex int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ opName string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instructionIndex
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.opName
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instructionIndex = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.opName = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.opName,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ opName: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
+// first and then the "insert before" instructions at same index. Implementation
+// of "insert after" is "insert before index+1".
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ },
+ }
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ opName: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ lastRewriteTokenIndexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ },
+ lastRewriteTokenIndexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
+ is, ok := tsr.programs[programName]
+ if ok {
+ tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
+ tsr.Rollback(DefaultProgramName, instructionIndex)
+}
+
+// DeleteProgram Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
+ tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
+ tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
+ tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
+ tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
+ tsr.Replace(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
+ tsr.ReplaceToken(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
+ i, ok := tsr.lastRewriteTokenIndexes[programName]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
+ tsr.lastRewriteTokenIndexes[programName] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, ProgramInitSize)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// GetTextDefault returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ DefaultProgramName,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// GetText returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
+ rewrites := tsr.programs[programName]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc...
+//
+// Here are the cases:
+//
+// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this 'replace'.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the 'replace' range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// The func returns a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instructionIndex] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instructionIndex] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instructionIndex] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instructionIndex] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instructionIndex] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
new file mode 100644
index 000000000..7b663bf84
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type TraceListener struct {
+ parser *BaseParser
+}
+
+func NewTraceListener(parser *BaseParser) *TraceListener {
+ tl := new(TraceListener)
+ tl.parser = parser
+ return tl
+}
+
+func (t *TraceListener) VisitErrorNode(_ ErrorNode) {
+}
+
+func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) {
+ fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
+
+func (t *TraceListener) VisitTerminal(node TerminalNode) {
+ fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()])
+}
+
+func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) {
+ fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go
new file mode 100644
index 000000000..313b0fc12
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go
@@ -0,0 +1,439 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+//
This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+//
Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(_, _, _ int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// AtomTransition
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+ t := &AtomTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionATOM,
+ label: intervalSet,
+ isEpsilon: false,
+ },
+ }
+ t.intervalSet = t.makeLabel()
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ BaseTransition
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+ return &RuleTransition{
+ BaseTransition: BaseTransition{
+ target: ruleStart,
+ isEpsilon: true,
+ serializationType: TransitionRULE,
+ },
+ ruleIndex: ruleIndex,
+ precedence: precedence,
+ followState: followState,
+ }
+}
+
+func (t *RuleTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ BaseTransition
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+ return &EpsilonTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionEPSILON,
+ isEpsilon: true,
+ },
+ outermostPrecedenceReturn: outermostPrecedenceReturn,
+ }
+}
+
+func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ BaseTransition
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+ t := &RangeTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionRANGE,
+ isEpsilon: false,
+ },
+ start: start,
+ stop: stop,
+ }
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+ return &BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ },
+ }
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ BaseAbstractPredicateTransition
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+ return &PredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPREDICATE,
+ isEpsilon: true,
+ },
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ predIndex: predIndex,
+ }
+}
+
+func (t *PredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ BaseTransition
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+ return &ActionTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionACTION,
+ isEpsilon: true,
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ actionIndex: actionIndex,
+ }
+}
+
+func (t *ActionTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+ t := &SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionSET,
+ },
+ }
+
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, _, _ int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+ t := &NotSetTransition{
+ SetTransition: SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionNOTSET,
+ },
+ },
+ }
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+ return &WildcardTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionWILDCARD,
+ },
+ }
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ BaseAbstractPredicateTransition
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+ return &PrecedencePredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPRECEDENCE,
+ isEpsilon: true,
+ },
+ },
+ precedence: precedence,
+ }
+}
+
+func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go
new file mode 100644
index 000000000..c288420fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+ GetSourceInterval() Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+ GetRuleContext() RuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+
+// TODO: Implement this?
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Walk performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, [EnterRule] is called before
+// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// then by triggering the event specific to the given parse tree node
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var ParseTreeWalkerDefault = NewParseTreeWalker()
+
+type IterativeParseTreeWalker struct {
+ *ParseTreeWalker
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
+ return new(IterativeParseTreeWalker)
+}
+
+func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ var stack []Tree
+ var indexStack []int
+ currentNode := t
+ currentIndex := 0
+
+ for currentNode != nil {
+ // pre-order visit
+ switch tt := currentNode.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ i.EnterRule(listener, currentNode.(RuleNode))
+ }
+ // Move down to first child, if exists
+ if currentNode.GetChildCount() > 0 {
+ stack = append(stack, currentNode)
+ indexStack = append(indexStack, currentIndex)
+ currentIndex = 0
+ currentNode = currentNode.GetChild(0)
+ continue
+ }
+
+ for {
+ // post-order visit
+ if ruleNode, ok := currentNode.(RuleNode); ok {
+ i.ExitRule(listener, ruleNode)
+ }
+ // No parent, so no siblings
+ if len(stack) == 0 {
+ currentNode = nil
+ currentIndex = 0
+ break
+ }
+ // Move to next sibling if possible
+ currentIndex++
+ if stack[len(stack)-1].GetChildCount() > currentIndex {
+ currentNode = stack[len(stack)-1].GetChild(currentIndex)
+ break
+ }
+ // No next, sibling, so move up
+ currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
+ currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
+ }
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go
new file mode 100644
index 000000000..f44c05d81
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go
@@ -0,0 +1,142 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
+// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += " " + s
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recognition for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// TreesGetChildren returns am ordered list of all children of this node
+//
+//goland:noinspection GoUnusedExportedFunction
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
+// and the last node is the parent of this node.
+//
+//goland:noinspection GoUnusedExportedFunction
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go
new file mode 100644
index 000000000..733d7df9d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go
@@ -0,0 +1,328 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+// NewBitSet creates a new bitwise set
+// TODO: See if we can replace with the standard library's BitSet
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to represent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relevant words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
+
+func isDirectory(dir string) (bool, error) {
+ fileInfo, err := os.Stat(dir)
+ if err != nil {
+ switch {
+ case errors.Is(err, syscall.ENOENT):
+ // The given directory does not exist, so we will try to create it
+ //
+ err = os.MkdirAll(dir, 0755)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+ case err != nil:
+ return false, err
+ default:
+ }
+ }
+ return fileInfo.IsDir(), err
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 000000000..30568e768
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,914 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.Formatter.AddCallDepth(1)
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole record
+ }
+
+ // Render builtins
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
+ }
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
+ }
+
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 000000000..7bd84761e
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
new file mode 100644
index 000000000..b0c95367e
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
@@ -0,0 +1,14 @@
+# editorconfig.org
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = tab
+indent_size = 8
+
+[*.{md,yml,yaml,json}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
new file mode 100644
index 000000000..176a458f9
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
new file mode 100644
index 000000000..5e3002f88
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+/.glide
diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
new file mode 100644
index 000000000..2ce45dd4e
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
@@ -0,0 +1,383 @@
+# Changelog
+
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
+## Release 3.2.0 (2020-12-14)
+
+### Added
+
+- #211: Added randInt function (thanks @kochurovro)
+- #223: Added fromJson and mustFromJson functions (thanks @mholt)
+- #242: Added a bcrypt function (thanks @robbiet480)
+- #253: Added randBytes function (thanks @MikaelSmith)
+- #254: Added dig function for dicts (thanks @nyarly)
+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
+- #268: Added and and all functions for testing conditions (thanks @phuslu)
+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
+ (thanks @andrewmostello)
+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
+- #270: Extend certificate functions to handle non-RSA keys + add support for
+ ed25519 keys (thanks @misberner)
+
+### Changed
+
+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
+- Using semver 3.1.1 and mergo 0.3.11
+
+### Fixed
+
+- #249: Fix htmlDateInZone example (thanks @spawnia)
+
+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
+0.3.9 via 0.3.10 release.
+
+## Release 3.1.0 (2020-04-16)
+
+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
+
+### Added
+
+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
+- #224: Added duration filter (thanks @frebib)
+- #205: Added `seq` function (thanks @thadc23)
+
+### Changed
+
+- #203: Unlambda functions with correct signature (thanks @muesli)
+- #236: Updated the license formatting for GitHub display purposes
+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
+ as it causes a breaking change for sprig. That issue is tracked at
+ https://github.com/imdario/mergo/issues/139
+
+### Fixed
+
+- #229: Fix `seq` example in docs (thanks @kalmant)
+
+## Release 3.0.2 (2019-12-13)
+
+### Fixed
+
+- #220: Updating to semver v3.0.3 to fix issue with <= ranges
+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
+
+## Release 3.0.1 (2019-12-08)
+
+### Fixed
+
+- #212: Updated semver fixing broken constraint checking with ^0.0
+
+## Release 3.0.0 (2019-10-02)
+
+### Added
+
+- #187: Added durationRound function (thanks @yjp20)
+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
+- #193: Added toRawJson support (thanks @Dean-Coakley)
+- #197: Added get support to dicts (thanks @Dean-Coakley)
+
+### Changed
+
+- #186: Moving dependency management to Go modules
+- #186: Updated semver to v3. This has changes in the way ^ is handled
+- #194: Updated documentation on merging and how it copies. Added example using deepCopy
+- #196: trunc now supports negative values (thanks @Dean-Coakley)
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+ use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+ switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release
diff --git a/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
new file mode 100644
index 000000000..f311b1eaa
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2013-2020 Masterminds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md
new file mode 100644
index 000000000..b5ab56425
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/README.md
@@ -0,0 +1,73 @@
+# Slim-Sprig: Template functions for Go templates [](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
+
+Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
+all functions that depend on external (non standard library) or crypto packages
+removed.
+The reason for this is to make this library more lightweight. Most of these
+functions (specially crypto ones) are not needed on most apps, but costs a lot
+in terms of binary size and compilation time.
+
+## Usage
+
+**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
+
+For standard usage, read on.
+
+### Load the Slim-Sprig library
+
+To load the Slim-Sprig `FuncMap`:
+
+```go
+
+import (
+ "html/template"
+
+ "github.com/go-task/slim-sprig"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+ template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+ types of operations are within the domain of template functions:
+ - Formatting
+ - Layout
+ - Simple type conversions
+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+ a sensible value. For example, converting a string to an integer should not
+ produce an error if conversion fails. Instead, it should display a default
+ value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+ (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+ data from a source.
+- Finally, do not override core Go template functions.
diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
new file mode 100644
index 000000000..8e6346bb1
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
@@ -0,0 +1,12 @@
+# https://taskfile.dev
+
+version: '3'
+
+tasks:
+ default:
+ cmds:
+ - task: test
+
+ test:
+ cmds:
+ - go test -v .
diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
new file mode 100644
index 000000000..d06e516d4
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/crypto.go
@@ -0,0 +1,24 @@
+package sprig
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash/adler32"
+)
+
+func sha256sum(input string) string {
+ hash := sha256.Sum256([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+ hash := sha1.Sum([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+ hash := adler32.Checksum([]byte(input))
+ return fmt.Sprintf("%d", hash)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go
new file mode 100644
index 000000000..ed022ddac
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/date.go
@@ -0,0 +1,152 @@
+package sprig
+
+import (
+ "strconv"
+ "time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+ return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+ return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+ return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+ var t time.Time
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case *time.Time:
+ t = *date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ case int32:
+ t = time.Unix(int64(date), 0)
+ }
+
+ loc, err := time.LoadLocation(zone)
+ if err != nil {
+ loc, _ = time.LoadLocation("UTC")
+ }
+
+ return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return date
+ }
+ return date.Add(d)
+}
+
+func mustDateModify(fmt string, date time.Time) (time.Time, error) {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return date.Add(d), nil
+}
+
+func dateAgo(date interface{}) string {
+ var t time.Time
+
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ }
+ // Drop resolution to seconds
+ duration := time.Since(t).Round(time.Second)
+ return duration.String()
+}
+
+func duration(sec interface{}) string {
+ var n int64
+ switch value := sec.(type) {
+ default:
+ n = 0
+ case string:
+ n, _ = strconv.ParseInt(value, 10, 64)
+ case int64:
+ n = value
+ }
+ return (time.Duration(n) * time.Second).String()
+}
+
+func durationRound(duration interface{}) string {
+ var d time.Duration
+ switch duration := duration.(type) {
+ default:
+ d = 0
+ case string:
+ d, _ = time.ParseDuration(duration)
+ case int64:
+ d = time.Duration(duration)
+ case time.Time:
+ d = time.Since(duration)
+ }
+
+ u := uint64(d)
+ neg := d < 0
+ if neg {
+ u = -u
+ }
+
+ var (
+ year = uint64(time.Hour) * 24 * 365
+ month = uint64(time.Hour) * 24 * 30
+ day = uint64(time.Hour) * 24
+ hour = uint64(time.Hour)
+ minute = uint64(time.Minute)
+ second = uint64(time.Second)
+ )
+ switch {
+ case u > year:
+ return strconv.FormatUint(u/year, 10) + "y"
+ case u > month:
+ return strconv.FormatUint(u/month, 10) + "mo"
+ case u > day:
+ return strconv.FormatUint(u/day, 10) + "d"
+ case u > hour:
+ return strconv.FormatUint(u/hour, 10) + "h"
+ case u > minute:
+ return strconv.FormatUint(u/minute, 10) + "m"
+ case u > second:
+ return strconv.FormatUint(u/second, 10) + "s"
+ }
+ return "0s"
+}
+
+func toDate(fmt, str string) time.Time {
+ t, _ := time.ParseInLocation(fmt, str, time.Local)
+ return t
+}
+
+func mustToDate(fmt, str string) (time.Time, error) {
+ return time.ParseInLocation(fmt, str, time.Local)
+}
+
+func unixEpoch(date time.Time) string {
+ return strconv.FormatInt(date.Unix(), 10)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
new file mode 100644
index 000000000..b9f979666
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/defaults.go
@@ -0,0 +1,163 @@
+package sprig
+
+import (
+ "bytes"
+ "encoding/json"
+ "math/rand"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+ if empty(given) || empty(given[0]) {
+ return d
+ }
+ return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+ g := reflect.ValueOf(given)
+ if !g.IsValid() {
+ return true
+ }
+
+ // Basically adapted from text/template.isTrue
+ switch g.Kind() {
+ default:
+ return g.IsNil()
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return g.Len() == 0
+ case reflect.Bool:
+ return !g.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ return g.Complex() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return g.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return g.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return g.Float() == 0
+ case reflect.Struct:
+ return false
+ }
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+ for _, val := range v {
+ if !empty(val) {
+ return val
+ }
+ }
+ return nil
+}
+
+// all returns true if empty(x) is false for all values x in the list.
+// If the list is empty, return true.
+func all(v ...interface{}) bool {
+ for _, val := range v {
+ if empty(val) {
+ return false
+ }
+ }
+ return true
+}
+
+// any returns true if empty(x) is false for any x in the list.
+// If the list is empty, return false.
+func any(v ...interface{}) bool {
+ for _, val := range v {
+ if !empty(val) {
+ return true
+ }
+ }
+ return false
+}
+
+// fromJson decodes JSON into a structured value, ignoring errors.
+func fromJson(v string) interface{} {
+ output, _ := mustFromJson(v)
+ return output
+}
+
+// mustFromJson decodes JSON into a structured value, returning errors.
+func mustFromJson(v string) (interface{}, error) {
+ var output interface{}
+ err := json.Unmarshal([]byte(v), &output)
+ return output, err
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+ output, _ := json.Marshal(v)
+ return string(output)
+}
+
+func mustToJson(v interface{}) (string, error) {
+ output, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+ output, _ := json.MarshalIndent(v, "", " ")
+ return string(output)
+}
+
+func mustToPrettyJson(v interface{}) (string, error) {
+ output, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func toRawJson(v interface{}) string {
+ output, err := mustToRawJson(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(output)
+}
+
+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func mustToRawJson(v interface{}) (string, error) {
+ buf := new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(&v)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSuffix(buf.String(), "\n"), nil
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+ if v {
+ return vt
+ }
+
+ return vf
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go
new file mode 100644
index 000000000..77ebc61b1
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/dict.go
@@ -0,0 +1,118 @@
+package sprig
+
+func get(d map[string]interface{}, key string) interface{} {
+ if val, ok := d[key]; ok {
+ return val
+ }
+ return ""
+}
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+ d[key] = value
+ return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+ delete(d, key)
+ return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+ _, ok := d[key]
+ return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+ res := []interface{}{}
+ for _, dict := range d {
+ if val, ok := dict[key]; ok {
+ res = append(res, val)
+ }
+ }
+ return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+ k := []string{}
+ for _, dict := range dicts {
+ for key := range dict {
+ k = append(k, key)
+ }
+ }
+ return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+ for _, k := range keys {
+ if v, ok := dict[k]; ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+
+ omit := make(map[string]bool, len(keys))
+ for _, k := range keys {
+ omit[k] = true
+ }
+
+ for k, v := range dict {
+ if _, ok := omit[k]; !ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+ dict := map[string]interface{}{}
+ lenv := len(v)
+ for i := 0; i < lenv; i += 2 {
+ key := strval(v[i])
+ if i+1 >= lenv {
+ dict[key] = ""
+ continue
+ }
+ dict[key] = v[i+1]
+ }
+ return dict
+}
+
+func values(dict map[string]interface{}) []interface{} {
+ values := []interface{}{}
+ for _, value := range dict {
+ values = append(values, value)
+ }
+
+ return values
+}
+
+func dig(ps ...interface{}) (interface{}, error) {
+ if len(ps) < 3 {
+ panic("dig needs at least three arguments")
+ }
+ dict := ps[len(ps)-1].(map[string]interface{})
+ def := ps[len(ps)-2]
+ ks := make([]string, len(ps)-2)
+ for i := 0; i < len(ks); i++ {
+ ks[i] = ps[i].(string)
+ }
+
+ return digFromDict(dict, def, ks)
+}
+
+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
+ k, ns := ks[0], ks[1:len(ks)]
+ step, has := dict[k]
+ if !has {
+ return d, nil
+ }
+ if len(ns) == 0 {
+ return step, nil
+ }
+ return digFromDict(step.(map[string]interface{}), d, ns)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go
new file mode 100644
index 000000000..aabb9d448
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/doc.go
@@ -0,0 +1,19 @@
+/*
+Package sprig provides template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+ t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+ In several cases, Sprig reverses the order of arguments from the way they
+ appear in the standard library. This is to make it easier to pipe
+ arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig
diff --git a/vendor/github.com/go-task/slim-sprig/v3/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go
new file mode 100644
index 000000000..5ea74f899
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/functions.go
@@ -0,0 +1,317 @@
+package sprig
+
+import (
+ "errors"
+ "html/template"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ ttemplate "text/template"
+ "time"
+)
+
+// FuncMap produces the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+ return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+ r := TxtFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+ r := HtmlFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+ return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+ return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+ gfm := make(map[string]interface{}, len(genericMap))
+ for k, v := range genericMap {
+ gfm[k] = v
+ }
+ return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environment or global state.
+var nonhermeticFunctions = []string{
+ // Date functions
+ "date",
+ "date_in_zone",
+ "date_modify",
+ "now",
+ "htmlDate",
+ "htmlDateInZone",
+ "dateInZone",
+ "dateModify",
+
+ // Strings
+ "randAlphaNum",
+ "randAlpha",
+ "randAscii",
+ "randNumeric",
+ "randBytes",
+ "uuidv4",
+
+ // OS
+ "env",
+ "expandenv",
+
+ // Network
+ "getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+ "hello": func() string { return "Hello!" },
+
+ // Date functions
+ "ago": dateAgo,
+ "date": date,
+ "date_in_zone": dateInZone,
+ "date_modify": dateModify,
+ "dateInZone": dateInZone,
+ "dateModify": dateModify,
+ "duration": duration,
+ "durationRound": durationRound,
+ "htmlDate": htmlDate,
+ "htmlDateInZone": htmlDateInZone,
+ "must_date_modify": mustDateModify,
+ "mustDateModify": mustDateModify,
+ "mustToDate": mustToDate,
+ "now": time.Now,
+ "toDate": toDate,
+ "unixEpoch": unixEpoch,
+
+ // Strings
+ "trunc": trunc,
+ "trim": strings.TrimSpace,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": strings.Title,
+ "substr": substring,
+ // Switch order so that "foo" | repeat 5
+ "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+ // Deprecated: Use trimAll.
+ "trimall": func(a, b string) string { return strings.Trim(b, a) },
+ // Switch order so that "$foo" | trimall "$"
+ "trimAll": func(a, b string) string { return strings.Trim(b, a) },
+ "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+ "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+ // Switch order so that "foobar" | contains "foo"
+ "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
+ "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+ "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+ "quote": quote,
+ "squote": squote,
+ "cat": cat,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "plural": plural,
+ "sha1sum": sha1sum,
+ "sha256sum": sha256sum,
+ "adler32sum": adler32sum,
+ "toString": strval,
+
+ // Wrap Atoi to stop errors.
+ "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
+ "int64": toInt64,
+ "int": toInt,
+ "float64": toFloat64,
+ "seq": seq,
+ "toDecimal": toDecimal,
+
+ //"gt": func(a, b int) bool {return a > b},
+ //"gte": func(a, b int) bool {return a >= b},
+ //"lt": func(a, b int) bool {return a < b},
+ //"lte": func(a, b int) bool {return a <= b},
+
+ // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+ "split": split,
+ "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+ // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+ "splitn": splitn,
+ "toStrings": strslice,
+
+ "until": until,
+ "untilStep": untilStep,
+
+ // VERY basic arithmetic.
+ "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+ "add": func(i ...interface{}) int64 {
+ var a int64 = 0
+ for _, b := range i {
+ a += toInt64(b)
+ }
+ return a
+ },
+ "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+ "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+ "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+ "mul": func(a interface{}, v ...interface{}) int64 {
+ val := toInt64(a)
+ for _, b := range v {
+ val = val * toInt64(b)
+ }
+ return val
+ },
+ "randInt": func(min, max int) int { return rand.Intn(max-min) + min },
+ "biggest": max,
+ "max": max,
+ "min": min,
+ "maxf": maxf,
+ "minf": minf,
+ "ceil": ceil,
+ "floor": floor,
+ "round": round,
+
+ // string slices. Note that we reverse the order b/c that's better
+ // for template processing.
+ "join": join,
+ "sortAlpha": sortAlpha,
+
+ // Defaults
+ "default": dfault,
+ "empty": empty,
+ "coalesce": coalesce,
+ "all": all,
+ "any": any,
+ "compact": compact,
+ "mustCompact": mustCompact,
+ "fromJson": fromJson,
+ "toJson": toJson,
+ "toPrettyJson": toPrettyJson,
+ "toRawJson": toRawJson,
+ "mustFromJson": mustFromJson,
+ "mustToJson": mustToJson,
+ "mustToPrettyJson": mustToPrettyJson,
+ "mustToRawJson": mustToRawJson,
+ "ternary": ternary,
+
+ // Reflection
+ "typeOf": typeOf,
+ "typeIs": typeIs,
+ "typeIsLike": typeIsLike,
+ "kindOf": kindOf,
+ "kindIs": kindIs,
+ "deepEqual": reflect.DeepEqual,
+
+ // OS:
+ "env": os.Getenv,
+ "expandenv": os.ExpandEnv,
+
+ // Network:
+ "getHostByName": getHostByName,
+
+ // Paths:
+ "base": path.Base,
+ "dir": path.Dir,
+ "clean": path.Clean,
+ "ext": path.Ext,
+ "isAbs": path.IsAbs,
+
+ // Filepaths:
+ "osBase": filepath.Base,
+ "osClean": filepath.Clean,
+ "osDir": filepath.Dir,
+ "osExt": filepath.Ext,
+ "osIsAbs": filepath.IsAbs,
+
+ // Encoding:
+ "b64enc": base64encode,
+ "b64dec": base64decode,
+ "b32enc": base32encode,
+ "b32dec": base32decode,
+
+ // Data Structures:
+ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
+ "list": list,
+ "dict": dict,
+ "get": get,
+ "set": set,
+ "unset": unset,
+ "hasKey": hasKey,
+ "pluck": pluck,
+ "keys": keys,
+ "pick": pick,
+ "omit": omit,
+ "values": values,
+
+ "append": push, "push": push,
+ "mustAppend": mustPush, "mustPush": mustPush,
+ "prepend": prepend,
+ "mustPrepend": mustPrepend,
+ "first": first,
+ "mustFirst": mustFirst,
+ "rest": rest,
+ "mustRest": mustRest,
+ "last": last,
+ "mustLast": mustLast,
+ "initial": initial,
+ "mustInitial": mustInitial,
+ "reverse": reverse,
+ "mustReverse": mustReverse,
+ "uniq": uniq,
+ "mustUniq": mustUniq,
+ "without": without,
+ "mustWithout": mustWithout,
+ "has": has,
+ "mustHas": mustHas,
+ "slice": slice,
+ "mustSlice": mustSlice,
+ "concat": concat,
+ "dig": dig,
+ "chunk": chunk,
+ "mustChunk": mustChunk,
+
+ // Flow Control:
+ "fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+ // Regex
+ "regexMatch": regexMatch,
+ "mustRegexMatch": mustRegexMatch,
+ "regexFindAll": regexFindAll,
+ "mustRegexFindAll": mustRegexFindAll,
+ "regexFind": regexFind,
+ "mustRegexFind": mustRegexFind,
+ "regexReplaceAll": regexReplaceAll,
+ "mustRegexReplaceAll": mustRegexReplaceAll,
+ "regexReplaceAllLiteral": regexReplaceAllLiteral,
+ "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
+ "regexSplit": regexSplit,
+ "mustRegexSplit": mustRegexSplit,
+ "regexQuoteMeta": regexQuoteMeta,
+
+ // URLs:
+ "urlParse": urlParse,
+ "urlJoin": urlJoin,
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go
new file mode 100644
index 000000000..ca0fbb789
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/list.go
@@ -0,0 +1,464 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+ return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+ l, err := mustPush(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append(nl, v), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot push on type %s", tp)
+ }
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+ l, err := mustPrepend(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
+ //return append([]interface{}{v}, list...)
+
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append([]interface{}{v}, nl...), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot prepend on type %s", tp)
+ }
+}
+
+func chunk(size int, list interface{}) [][]interface{} {
+ l, err := mustChunk(size, list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustChunk(size int, list interface{}) ([][]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+
+ cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
+ nl := make([][]interface{}, cs)
+
+ for i := 0; i < cs; i++ {
+ clen := size
+ if i == cs-1 {
+ clen = int(math.Floor(math.Mod(float64(l), float64(size))))
+ if clen == 0 {
+ clen = size
+ }
+ }
+
+ nl[i] = make([]interface{}, clen)
+
+ for j := 0; j < clen; j++ {
+ ix := i*size + j
+ nl[i][j] = l2.Index(ix).Interface()
+ }
+ }
+
+ return nl, nil
+
+ default:
+ return nil, fmt.Errorf("Cannot chunk type %s", tp)
+ }
+}
+
+func last(list interface{}) interface{} {
+ l, err := mustLast(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustLast(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(l - 1).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find last on type %s", tp)
+ }
+}
+
+func first(list interface{}) interface{} {
+ l, err := mustFirst(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustFirst(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(0).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find first on type %s", tp)
+ }
+}
+
+func rest(list interface{}) []interface{} {
+ l, err := mustRest(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustRest(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 1; i < l; i++ {
+ nl[i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find rest on type %s", tp)
+ }
+}
+
+func initial(list interface{}) []interface{} {
+ l, err := mustInitial(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustInitial(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 0; i < l-1; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find initial on type %s", tp)
+ }
+}
+
+func sortAlpha(list interface{}) []string {
+ k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+ switch k {
+ case reflect.Slice, reflect.Array:
+ a := strslice(list)
+ s := sort.StringSlice(a)
+ s.Sort()
+ return s
+ }
+ return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+ l, err := mustReverse(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustReverse(v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(v).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(v)
+
+ l := l2.Len()
+ // We do not sort in place because the incoming array should not be altered.
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[l-i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
+ }
+}
+
+func compact(list interface{}) []interface{} {
+ l, err := mustCompact(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustCompact(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !empty(item) {
+ nl = append(nl, item)
+ }
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot compact on type %s", tp)
+ }
+}
+
+func uniq(list interface{}) []interface{} {
+ l, err := mustUniq(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustUniq(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ dest := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(dest, item) {
+ dest = append(dest, item)
+ }
+ }
+
+ return dest, nil
+ default:
+ return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
+ }
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+ for _, h := range haystack {
+ if reflect.DeepEqual(needle, h) {
+ return true
+ }
+ }
+ return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+ l, err := mustWithout(list, omit...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ res := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(omit, item) {
+ res = append(res, item)
+ }
+ }
+
+ return res, nil
+ default:
+ return nil, fmt.Errorf("Cannot find without on type %s", tp)
+ }
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+ l, err := mustHas(needle, haystack)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustHas(needle interface{}, haystack interface{}) (bool, error) {
+ if haystack == nil {
+ return false, nil
+ }
+ tp := reflect.TypeOf(haystack).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(haystack)
+ var item interface{}
+ l := l2.Len()
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if reflect.DeepEqual(needle, item) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+ default:
+ return false, fmt.Errorf("Cannot find has on type %s", tp)
+ }
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3 -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+ l, err := mustSlice(list, indices...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ var start, end int
+ if len(indices) > 0 {
+ start = toInt(indices[0])
+ }
+ if len(indices) < 2 {
+ end = l
+ } else {
+ end = toInt(indices[1])
+ }
+
+ return l2.Slice(start, end).Interface(), nil
+ default:
+ return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
+ }
+}
+
+func concat(lists ...interface{}) interface{} {
+ var res []interface{}
+ for _, list := range lists {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+ for i := 0; i < l2.Len(); i++ {
+ res = append(res, l2.Index(i).Interface())
+ }
+ default:
+ panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go
new file mode 100644
index 000000000..108d78a94
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/network.go
@@ -0,0 +1,12 @@
+package sprig
+
+import (
+ "math/rand"
+ "net"
+)
+
+func getHostByName(name string) string {
+ addrs, _ := net.LookupHost(name)
+ //TODO: add error handing when release v3 comes out
+ return addrs[rand.Intn(len(addrs))]
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
new file mode 100644
index 000000000..98cbb37a1
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/numeric.go
@@ -0,0 +1,228 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return float64(val.Int())
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return float64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ return float64(val.Uint())
+ case reflect.Float32, reflect.Float64:
+ return val.Float()
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func toInt(v interface{}) int {
+ //It's not optimal. Bud I don't want duplicate toInt64 code.
+ return int(toInt64(v))
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return val.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ tv := val.Uint()
+ if tv <= math.MaxInt64 {
+ return int64(tv)
+ }
+ // TODO: What is the sensible thing to do here?
+ return math.MaxInt64
+ case reflect.Float32, reflect.Float64:
+ return int64(val.Float())
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb > aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func maxf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Max(aa, bb)
+ }
+ return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb < aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func minf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Min(aa, bb)
+ }
+ return aa
+}
+
+func until(count int) []int {
+ step := 1
+ if count < 0 {
+ step = -1
+ }
+ return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+ v := []int{}
+
+ if stop < start {
+ if step >= 0 {
+ return v
+ }
+ for i := start; i > stop; i += step {
+ v = append(v, i)
+ }
+ return v
+ }
+
+ if step <= 0 {
+ return v
+ }
+ for i := start; i < stop; i += step {
+ v = append(v, i)
+ }
+ return v
+}
+
+func floor(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, rOpt ...float64) float64 {
+ roundOn := .5
+ if len(rOpt) > 0 {
+ roundOn = rOpt[0]
+ }
+ val := toFloat64(a)
+ places := toFloat64(p)
+
+ var round float64
+ pow := math.Pow(10, places)
+ digit := pow * val
+ _, div := math.Modf(digit)
+ if div >= roundOn {
+ round = math.Ceil(digit)
+ } else {
+ round = math.Floor(digit)
+ }
+ return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+ if err != nil {
+ return 0
+ }
+ return result
+}
+
+func seq(params ...int) string {
+ increment := 1
+ switch len(params) {
+ case 0:
+ return ""
+ case 1:
+ start := 1
+ end := params[0]
+ if end < start {
+ increment = -1
+ }
+ return intArrayToString(untilStep(start, end+increment, increment), " ")
+ case 3:
+ start := params[0]
+ end := params[2]
+ step := params[1]
+ if end < start {
+ increment = -1
+ if step > 0 {
+ return ""
+ }
+ }
+ return intArrayToString(untilStep(start, end+increment, step), " ")
+ case 2:
+ start := params[0]
+ end := params[1]
+ step := 1
+ if end < start {
+ step = -1
+ }
+ return intArrayToString(untilStep(start, end+step, step), " ")
+ default:
+ return ""
+ }
+}
+
+func intArrayToString(slice []int, delimeter string) string {
+ return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
new file mode 100644
index 000000000..8a65c132f
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/reflect.go
@@ -0,0 +1,28 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+ return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+ t := typeOf(src)
+ return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+ return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+ return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+ return reflect.ValueOf(src).Kind().String()
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go
new file mode 100644
index 000000000..fab551018
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/regex.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+ match, _ := regexp.MatchString(regex, s)
+ return match
+}
+
+func mustRegexMatch(regex string, s string) (bool, error) {
+ return regexp.MatchString(regex, s)
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.FindAllString(s, n)
+}
+
+func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.FindAllString(s, n), nil
+}
+
+func regexFind(regex string, s string) string {
+ r := regexp.MustCompile(regex)
+ return r.FindString(s)
+}
+
+func mustRegexFind(regex string, s string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.FindString(s), nil
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllString(s, repl)
+}
+
+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllString(s, repl), nil
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllLiteralString(s, repl)
+}
+
+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllLiteralString(s, repl), nil
+}
+
+func regexSplit(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.Split(s, n)
+}
+
+func mustRegexSplit(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.Split(s, n), nil
+}
+
+func regexQuoteMeta(s string) string {
+ return regexp.QuoteMeta(s)
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go
new file mode 100644
index 000000000..3c62d6b6f
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/strings.go
@@ -0,0 +1,189 @@
+package sprig
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+func base64encode(v string) string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func base32encode(v string) string {
+ return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+ data, err := base32.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func quote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("%q", strval(s)))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("'%v'", s))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+ v = removeNilElements(v)
+ r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+ return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+ return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+ return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+ if count == 1 {
+ return one
+ }
+ return many
+}
+
+func strslice(v interface{}) []string {
+ switch v := v.(type) {
+ case []string:
+ return v
+ case []interface{}:
+ b := make([]string, 0, len(v))
+ for _, s := range v {
+ if s != nil {
+ b = append(b, strval(s))
+ }
+ }
+ return b
+ default:
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := val.Len()
+ b := make([]string, 0, l)
+ for i := 0; i < l; i++ {
+ value := val.Index(i).Interface()
+ if value != nil {
+ b = append(b, strval(value))
+ }
+ }
+ return b
+ default:
+ if v == nil {
+ return []string{}
+ }
+
+ return []string{strval(v)}
+ }
+ }
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+ newSlice := make([]interface{}, 0, len(v))
+ for _, i := range v {
+ if i != nil {
+ newSlice = append(newSlice, i)
+ }
+ }
+ return newSlice
+}
+
+func strval(v interface{}) string {
+ switch v := v.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ case error:
+ return v.Error()
+ case fmt.Stringer:
+ return v.String()
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+func trunc(c int, s string) string {
+ if c < 0 && len(s)+c > 0 {
+ return s[len(s)+c:]
+ }
+ if c >= 0 && len(s) > c {
+ return s[:c]
+ }
+ return s
+}
+
+func join(sep string, v interface{}) string {
+ return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+ parts := strings.Split(orig, sep)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+ parts := strings.SplitN(orig, sep, n)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ if start < 0 {
+ return s[:end]
+ }
+ if end < 0 || end > len(s) {
+ return s[start:]
+ }
+ return s[start:end]
+}
diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go
new file mode 100644
index 000000000..b8e120e19
--- /dev/null
+++ b/vendor/github.com/go-task/slim-sprig/v3/url.go
@@ -0,0 +1,66 @@
+package sprig
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+ value, ok := dict[key]
+ if !ok {
+ return ""
+ }
+ tp := reflect.TypeOf(value).Kind()
+ if tp != reflect.String {
+ panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+ }
+ return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+ dict := map[string]interface{}{}
+ parsedURL, err := url.Parse(v)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse url: %s", err))
+ }
+ dict["scheme"] = parsedURL.Scheme
+ dict["host"] = parsedURL.Host
+ dict["hostname"] = parsedURL.Hostname()
+ dict["path"] = parsedURL.Path
+ dict["query"] = parsedURL.RawQuery
+ dict["opaque"] = parsedURL.Opaque
+ dict["fragment"] = parsedURL.Fragment
+ if parsedURL.User != nil {
+ dict["userinfo"] = parsedURL.User.String()
+ } else {
+ dict["userinfo"] = ""
+ }
+
+ return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+ resURL := url.URL{
+ Scheme: dictGetOrEmpty(d, "scheme"),
+ Host: dictGetOrEmpty(d, "host"),
+ Path: dictGetOrEmpty(d, "path"),
+ RawQuery: dictGetOrEmpty(d, "query"),
+ Opaque: dictGetOrEmpty(d, "opaque"),
+ Fragment: dictGetOrEmpty(d, "fragment"),
+ }
+ userinfo := dictGetOrEmpty(d, "userinfo")
+ var user *url.Userinfo
+ if userinfo != "" {
+ tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+ }
+ user = tempURL.User
+ }
+
+ resURL.User = user
+ return resURL.String()
+}
diff --git a/vendor/github.com/google/cel-go/LICENSE b/vendor/github.com/google/cel-go/LICENSE
new file mode 100644
index 000000000..2493ed2eb
--- /dev/null
+++ b/vendor/github.com/google/cel-go/LICENSE
@@ -0,0 +1,233 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+===========================================================================
+The common/types/pb/equal.go modification of proto.Equal logic
+===========================================================================
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel
new file mode 100644
index 000000000..81549fb4c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -0,0 +1,91 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cel.go",
+ "decls.go",
+ "env.go",
+ "folding.go",
+ "io.go",
+ "inlining.go",
+ "library.go",
+ "macro.go",
+ "optimizer.go",
+ "options.go",
+ "program.go",
+ "validator.go",
+ ],
+ importpath = "github.com/google/cel-go/cel",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "//parser:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "cel_example_test.go",
+ "cel_test.go",
+ "decls_test.go",
+ "env_test.go",
+ "folding_test.go",
+ "io_test.go",
+ "inlining_test.go",
+ "optimizer_test.go",
+ "validator_test.go",
+ ],
+ data = [
+ "//cel/testdata:gen_test_fds",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//ext:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/cel/cel.go b/vendor/github.com/google/cel-go/cel/cel.go
new file mode 100644
index 000000000..eb5a9f4cc
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/cel.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cel defines the top-level interface for the Common Expression Language (CEL).
+//
+// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
+// expressions against user-defined environments.
+package cel
diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go
new file mode 100644
index 000000000..418806021
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/decls.go
@@ -0,0 +1,370 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types.
+type Kind = types.Kind
+
+const (
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind Kind = types.DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ AnyKind = types.AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind = types.BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind = types.BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind = types.DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind = types.DurationKind
+
+ // IntKind represents an integer type.
+ IntKind = types.IntKind
+
+ // ListKind represents a list type.
+ ListKind = types.ListKind
+
+ // MapKind represents a map type.
+ MapKind = types.MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind = types.NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind = types.OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind = types.StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind = types.StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind = types.TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind = types.TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind = types.TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind = types.UintKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = types.AnyType
+ // BoolType represents the bool type.
+ BoolType = types.BoolType
+ // BytesType represents the bytes type.
+ BytesType = types.BytesType
+ // DoubleType represents the double type.
+ DoubleType = types.DoubleType
+ // DurationType represents the CEL duration type.
+ DurationType = types.DurationType
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = types.DynType
+ // IntType represents the int type.
+ IntType = types.IntType
+ // NullType represents the type of a null value.
+ NullType = types.NullType
+ // StringType represents the string type.
+ StringType = types.StringType
+ // TimestampType represents the time type.
+ TimestampType = types.TimestampType
+ // TypeType represents a CEL type
+ TypeType = types.TypeType
+ // UintType represents a uint type.
+ UintType = types.UintType
+
+ // function references for instantiating new types.
+
+ // ListType creates an instances of a list type value with the provided element type.
+ ListType = types.NewListType
+ // MapType creates an instance of a map type value with the provided key and value types.
+ MapType = types.NewMapType
+ // NullableType creates an instance of a nullable type with the provided wrapped type.
+ //
+ // Note: only primitive types are supported as wrapped types.
+ NullableType = types.NewNullableType
+ // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+ OptionalType = types.NewOptionalType
+ // OpaqueType creates an abstract parameterized type with a given name.
+ OpaqueType = types.NewOpaqueType
+ // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type.
+ ObjectType = types.NewObjectType
+ // TypeParamType creates a parameterized type instance.
+ TypeParamType = types.NewTypeParamType
+)
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type = types.Type
+
+// Constant creates an instances of an identifier declaration with a variable name, type, and value.
+func Constant(name string, t *Type, v ref.Val) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewConstant(name, t, v))
+ return e, nil
+ }
+}
+
+// Variable creates an instance of a variable declaration with a variable name and type.
+func Variable(name string, t *Type) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, decls.NewVariable(name, t))
+ return e, nil
+ }
+}
+
+// Function defines a function and overloads with optional singleton or per-overload bindings.
+//
+// Using Function is roughly equivalent to calling Declarations() to declare the function signatures
+// and Functions() to define the function bindings, if they have been defined. Specifying the
+// same function name more than once will result in the aggregation of the function overloads. If any
+// signatures conflict between the existing and new function definition an error will be raised.
+// However, if the signatures are identical and the overload ids are the same, the redefinition will
+// be considered a no-op.
+//
+// One key difference with using Function() is that each FunctionDecl provided will handle dynamic
+// dispatch based on the type-signatures of the overloads provided which means overload resolution at
+// runtime is handled out of the box rather than via a custom binding for overload resolution via
+// Functions():
+//
+// - Overloads are searched in the order they are declared
+// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
+//
+// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
+// - In the event that a default dispatch occurs, the first overload provided is the one invoked
+//
+// If you intend to use overloads which differentiate based on the key or element type of a list or
+// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this
+// will allow your implementation to determine how best to handle dispatch and the default behavior
+// for empty lists and maps whose contents cannot be inspected.
+//
+// For functions which use parameterized opaque types (abstract types), consider using a singleton
+// function which is capable of inspecting the contents of the type and resolving the appropriate
+// overload as CEL can only make inferences by type-name regarding such types.
+func Function(name string, opts ...FunctionOpt) EnvOption {
+ return func(e *Env) (*Env, error) {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ return nil, err
+ }
+ if existing, found := e.functions[fn.Name()]; found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ return e, nil
+ }
+}
+
+// FunctionOpt defines a functional option for configuring a function declaration.
+type FunctionOpt = decls.FunctionOpt
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonUnaryBinding(fn, traits...)
+}
+
+// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonBinaryBinding
+func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return decls.SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonFunctionBinding
+func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return decls.SingletonFunctionBinding(fn, traits...)
+}
+
+// DisableDeclaration disables the function signatures, effectively removing them from the type-check
+// environment while preserving the runtime bindings.
+func DisableDeclaration(value bool) FunctionOpt {
+ return decls.DisableDeclaration(value)
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.Overload(overloadID, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt {
+ return decls.MemberOverload(overloadID, args, resultType, opts...)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt = decls.OverloadOpt
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return decls.UnaryBinding(binding)
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return decls.BinaryBinding(binding)
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return decls.FunctionBinding(binding)
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return decls.OverloadIsNonStrict()
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return decls.OverloadOperandTrait(trait)
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ return types.TypeToExprType(t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return types.ExprTypeToType(t)
+}
+
+// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function.
+func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ return AlphaProtoAsDeclaration(d)
+}
+
+// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption.
+func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) {
+ canonical := &celpb.Decl{}
+ if err := convertProto(d, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsDeclaration(canonical)
+}
+
+// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption.
+func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) {
+ switch d.GetDeclKind().(type) {
+ case *celpb.Decl_Function:
+ overloads := d.GetFunction().GetOverloads()
+ opts := make([]FunctionOpt, len(overloads))
+ for i, o := range overloads {
+ args := make([]*Type, len(o.GetParams()))
+ for j, p := range o.GetParams() {
+ a, err := types.ProtoAsType(p)
+ if err != nil {
+ return nil, err
+ }
+ args[j] = a
+ }
+ res, err := types.ProtoAsType(o.GetResultType())
+ if err != nil {
+ return nil, err
+ }
+ if o.IsInstanceFunction {
+ opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res)
+ } else {
+ opts[i] = decls.Overload(o.GetOverloadId(), args, res)
+ }
+ }
+ return Function(d.GetName(), opts...), nil
+ case *celpb.Decl_Ident:
+ t, err := types.ProtoAsType(d.GetIdent().GetType())
+ if err != nil {
+ return nil, err
+ }
+ if d.GetIdent().GetValue() == nil {
+ return Variable(d.GetName(), t), nil
+ }
+ val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return Constant(d.GetName(), t, val), nil
+ default:
+ return nil, fmt.Errorf("unsupported decl: %v", d)
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
new file mode 100644
index 000000000..ab736b776
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -0,0 +1,894 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/google/cel-go/checker"
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common"
+ celast "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface representing a user-provided expression.
+type Source = common.Source
+
+// Ast representing the checked or unchecked expression, its source, and related metadata such as
+// source position information.
+type Ast struct {
+ source Source
+ impl *celast.AST
+}
+
+// NativeRep converts the AST to a Go-native representation.
+func (ast *Ast) NativeRep() *celast.AST {
+ if ast == nil {
+ return nil
+ }
+ return ast.impl
+}
+
+// Expr returns the proto serializable instance of the parsed/checked expression.
+//
+// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr()
+// the result instead.
+func (ast *Ast) Expr() *exprpb.Expr {
+ if ast == nil {
+ return nil
+ }
+ pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr())
+ return pbExpr
+}
+
+// IsChecked returns whether the Ast value has been successfully type-checked.
+func (ast *Ast) IsChecked() bool {
+ return ast.NativeRep().IsChecked()
+}
+
+// SourceInfo returns character offset and newline position information about expression elements.
+func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
+ if ast == nil {
+ return nil
+ }
+ pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo())
+ return pbInfo
+}
+
+// ResultType returns the output type of the expression if the Ast has been type-checked, else
+// returns chkdecls.Dyn as the parse step cannot infer the type.
+//
+// Deprecated: use OutputType
+func (ast *Ast) ResultType() *exprpb.Type {
+ out := ast.OutputType()
+ t, err := TypeToExprType(out)
+ if err != nil {
+ return chkdecls.Dyn
+ }
+ return t
+}
+
+// OutputType returns the output type of the expression if the Ast has been type-checked, else
+// returns cel.DynType as the parse step cannot infer types.
+func (ast *Ast) OutputType() *Type {
+ if ast == nil {
+ return types.ErrorType
+ }
+ return ast.NativeRep().GetType(ast.NativeRep().Expr().ID())
+}
+
+// Source returns a view of the input used to create the Ast. This source may be complete or
+// constructed from the SourceInfo.
+func (ast *Ast) Source() Source {
+ if ast == nil {
+ return nil
+ }
+ return ast.source
+}
+
+// FormatType converts a type message into a string representation.
+//
+// Deprecated: prefer FormatCELType
+func FormatType(t *exprpb.Type) string {
+ return checker.FormatCheckedType(t)
+}
+
+// FormatCELType formats a cel.Type value to a string representation.
+//
+// The type formatting is identical to FormatType.
+func FormatCELType(t *Type) string {
+ return checker.FormatCELType(t)
+}
+
+// Env encapsulates the context necessary to perform parsing, type checking, or generation of
+// evaluable programs for different expressions.
+type Env struct {
+ Container *containers.Container
+ variables []*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+ macros []parser.Macro
+ adapter types.Adapter
+ provider types.Provider
+ features map[int]bool
+ appliedFeatures map[int]bool
+ libraries map[string]bool
+ validators []ASTValidator
+ costOptions []checker.CostOption
+
+ // Internal parser representation
+ prsr *parser.Parser
+ prsrOpts []parser.Option
+
+ // Internal checker representation
+ chkMutex sync.Mutex
+ chk *checker.Env
+ chkErr error
+ chkOnce sync.Once
+ chkOpts []checker.Option
+
+ // Program options tied to the environment
+ progOpts []ProgramOption
+}
+
+// NewEnv creates a program environment configured with the standard library of CEL functions and
+// macros. The Env value returned can parse and check any CEL program which builds upon the core
+// features documented in the CEL specification.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewEnv(opts ...EnvOption) (*Env, error) {
+ // Extend the statically configured standard environment, disabling eager validation to ensure
+ // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier
+ // releases. The user provided options can easily re-enable the eager validation as they are
+ // processed after this default option.
+ stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...)
+ env, err := getStdEnv()
+ if err != nil {
+ return nil, err
+ }
+ return env.Extend(stdOpts...)
+}
+
+// NewCustomEnv creates a custom program environment which is not automatically configured with the
+// standard library of functions and macros documented in the CEL spec.
+//
+// The purpose for using a custom environment might be for subsetting the standard library produced
+// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to
+// limit the compute and memory impact of a CEL program by controlling the functions and macros
+// that may appear in a given expression.
+//
+// See the EnvOption helper functions for the options that can be used to configure the
+// environment.
+func NewCustomEnv(opts ...EnvOption) (*Env, error) {
+ registry, err := types.NewRegistry()
+ if err != nil {
+ return nil, err
+ }
+ return (&Env{
+ variables: []*decls.VariableDecl{},
+ functions: map[string]*decls.FunctionDecl{},
+ macros: []parser.Macro{},
+ Container: containers.DefaultContainer,
+ adapter: registry,
+ provider: registry,
+ features: map[int]bool{},
+ appliedFeatures: map[int]bool{},
+ libraries: map[string]bool{},
+ validators: []ASTValidator{},
+ progOpts: []ProgramOption{},
+ costOptions: []checker.CostOption{},
+ }).configure(opts)
+}
+
+// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
+// If any `ASTValidators` are configured on the environment, they will be applied after a valid
+// type-check result. If any issues are detected, the validators will provide them on the
+// output Issues object.
+//
+// Either checking or validation has failed if the returned Issues value and its Issues.Err()
+// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a
+// fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call: however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
+ // Construct the internal checker env, erroring if there is an issue adding the declarations.
+ chk, err := e.initChecker()
+ if err != nil {
+ errs := common.NewErrors(ast.Source())
+ errs.ReportError(common.NoLocation, err.Error())
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ }
+
+ checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk)
+ if len(errs.GetErrors()) > 0 {
+ return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ }
+ // Manually create the Ast to ensure that the Ast source information (which may be more
+ // detailed than the information provided by Check), is returned to the caller.
+ ast = &Ast{
+ source: ast.Source(),
+ impl: checked}
+
+ // Avoid creating a validator config if it's not needed.
+ if len(e.validators) == 0 {
+ return ast, nil
+ }
+
+ // Generate a validator configuration from the set of configured validators.
+ vConfig := newValidatorConfig()
+ for _, v := range e.validators {
+ if cv, ok := v.(ASTValidatorConfigurer); ok {
+ cv.Configure(vConfig)
+ }
+ }
+ // Apply additional validators on the type-checked result.
+ iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
+ for _, v := range e.validators {
+ v.Validate(e, vConfig, checked, iss)
+ }
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ return ast, nil
+}
+
+// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the Compile step will not continue with the Check
+// phase. If non-error issues are encountered during Parse, they may be combined with any issues
+// discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) Compile(txt string) (*Ast, *Issues) {
+ return e.CompileSource(common.NewTextSource(txt))
+}
+
+// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and
+// associated issues.
+//
+// If an error is encountered during parsing the CompileSource step will not continue with the
+// Check phase. If non-error issues are encountered during Parse, they may be combined with any
+// issues discovered during Check.
+//
+// Note, for parse-only uses of CEL use Parse.
+func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
+ ast, iss := e.ParseSource(src)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ checked, iss2 := e.Check(ast)
+ if iss2.Err() != nil {
+ return nil, iss2
+ }
+ return checked, iss2
+}
+
+// Extend the current environment with additional options to produce a new Env.
+//
+// Note, the extended Env value should not share memory with the original. It is possible, however,
+// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable.
+// To ensure separation of state between extended environments either make sure the TypeAdapter and
+// TypeProvider are immutable, or that their underlying implementations are based on the
+// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
+func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
+ chk, chkErr := e.getCheckerOrError()
+ if chkErr != nil {
+ return nil, chkErr
+ }
+
+ prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
+ copy(prsrOptsCopy, e.prsrOpts)
+
+ // The type-checker is configured with Declarations. The declarations may either be provided
+ // as options which have not yet been validated, or may come from a previous checker instance
+ // whose types have already been validated.
+ chkOptsCopy := make([]checker.Option, len(e.chkOpts))
+ copy(chkOptsCopy, e.chkOpts)
+
+ // Copy the declarations if needed.
+ if chk != nil {
+ // If the type-checker has already been instantiated, then the e.declarations have been
+ // validated within the chk instance.
+ chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
+ }
+ varsCopy := make([]*decls.VariableDecl, len(e.variables))
+ copy(varsCopy, e.variables)
+
+ // Copy macros and program options
+ macsCopy := make([]parser.Macro, len(e.macros))
+ progOptsCopy := make([]ProgramOption, len(e.progOpts))
+ copy(macsCopy, e.macros)
+ copy(progOptsCopy, e.progOpts)
+
+ // Copy the adapter / provider if they appear to be mutable.
+ adapter := e.adapter
+ provider := e.provider
+ adapterReg, isAdapterReg := e.adapter.(*types.Registry)
+ providerReg, isProviderReg := e.provider.(*types.Registry)
+ // In most cases the provider and adapter will be a ref.TypeRegistry;
+ // however, in the rare cases where they are not, they are assumed to
+ // be immutable. Since it is possible to set the TypeProvider separately
+ // from the TypeAdapter, the possible configurations which could use a
+ // TypeRegistry as the base implementation are captured below.
+ if isAdapterReg && isProviderReg {
+ reg := providerReg.Copy()
+ provider = reg
+ // If the adapter and provider are the same object, set the adapter
+ // to the same ref.TypeRegistry as the provider.
+ if adapterReg == providerReg {
+ adapter = reg
+ } else {
+ // Otherwise, make a copy of the adapter.
+ adapter = adapterReg.Copy()
+ }
+ } else if isProviderReg {
+ provider = providerReg.Copy()
+ } else if isAdapterReg {
+ adapter = adapterReg.Copy()
+ }
+
+ featuresCopy := make(map[int]bool, len(e.features))
+ for k, v := range e.features {
+ featuresCopy[k] = v
+ }
+ appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures))
+ for k, v := range e.appliedFeatures {
+ appliedFeaturesCopy[k] = v
+ }
+ funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
+ for k, v := range e.functions {
+ funcsCopy[k] = v
+ }
+ libsCopy := make(map[string]bool, len(e.libraries))
+ for k, v := range e.libraries {
+ libsCopy[k] = v
+ }
+ validatorsCopy := make([]ASTValidator, len(e.validators))
+ copy(validatorsCopy, e.validators)
+ costOptsCopy := make([]checker.CostOption, len(e.costOptions))
+ copy(costOptsCopy, e.costOptions)
+
+ ext := &Env{
+ Container: e.Container,
+ variables: varsCopy,
+ functions: funcsCopy,
+ macros: macsCopy,
+ progOpts: progOptsCopy,
+ adapter: adapter,
+ features: featuresCopy,
+ appliedFeatures: appliedFeaturesCopy,
+ libraries: libsCopy,
+ validators: validatorsCopy,
+ provider: provider,
+ chkOpts: chkOptsCopy,
+ prsrOpts: prsrOptsCopy,
+ costOptions: costOptsCopy,
+ }
+ return ext.configure(opts)
+}
+
+// HasFeature checks whether the environment enables the given feature
+// flag, as enumerated in options.go.
+func (e *Env) HasFeature(flag int) bool {
+ enabled, has := e.features[flag]
+ return has && enabled
+}
+
+// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
+func (e *Env) HasLibrary(libName string) bool {
+ configured, exists := e.libraries[libName]
+ return exists && configured
+}
+
+// Libraries returns a list of SingletonLibrary that have been configured in the environment.
+func (e *Env) Libraries() []string {
+ libraries := make([]string, 0, len(e.libraries))
+ for libName := range e.libraries {
+ libraries = append(libraries, libName)
+ }
+ return libraries
+}
+
+// HasFunction returns whether a specific function has been configured in the environment
+func (e *Env) HasFunction(functionName string) bool {
+ _, ok := e.functions[functionName]
+ return ok
+}
+
+// Functions returns map of Functions, keyed by function name, that have been configured in the environment.
+func (e *Env) Functions() map[string]*decls.FunctionDecl {
+ return e.functions
+}
+
+// HasValidator returns whether a specific ASTValidator has been configured in the environment.
+func (e *Env) HasValidator(name string) bool {
+ for _, v := range e.validators {
+ if v.Name() == name {
+ return true
+ }
+ }
+ return false
+}
+
+// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
+//
+// This form of Parse creates a Source value for the input `txt` and forwards to the
+// ParseSource method.
+func (e *Env) Parse(txt string) (*Ast, *Issues) {
+ src := common.NewTextSource(txt)
+ return e.ParseSource(src)
+}
+
+// ParseSource parses the input source to an Ast and/or set of Issues.
+//
+// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
+// Issues should be inspected if they are non-nil, but may not represent a fatal error.
+//
+// It is possible to have both non-nil Ast and Issues values returned from this call; however,
+// the mere presence of an Ast does not imply that it is valid for use.
+func (e *Env) ParseSource(src Source) (*Ast, *Issues) {
+ parsed, errs := e.prsr.Parse(src)
+ if len(errs.GetErrors()) > 0 {
+ return nil, &Issues{errs: errs}
+ }
+ return &Ast{source: src, impl: parsed}, nil
+}
+
+// Program generates an evaluable instance of the Ast within the environment (Env).
+func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
+ return e.PlanProgram(ast.NativeRep(), opts...)
+}
+
+// PlanProgram generates an evaluable instance of the AST in the go-native representation within
+// the environment (Env).
+func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) {
+ optSet := e.progOpts
+ if len(opts) != 0 {
+ mergedOpts := []ProgramOption{}
+ mergedOpts = append(mergedOpts, e.progOpts...)
+ mergedOpts = append(mergedOpts, opts...)
+ optSet = mergedOpts
+ }
+ return newProgram(e, a, optSet)
+}
+
+// CELTypeAdapter returns the `types.Adapter` configured for the environment.
+func (e *Env) CELTypeAdapter() types.Adapter {
+ return e.adapter
+}
+
+// CELTypeProvider returns the `types.Provider` configured for the environment.
+func (e *Env) CELTypeProvider() types.Provider {
+ return e.provider
+}
+
+// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
+//
+// Deprecated: use CELTypeAdapter()
+func (e *Env) TypeAdapter() ref.TypeAdapter {
+ return e.adapter
+}
+
+// TypeProvider returns the `ref.TypeProvider` configured for the environment.
+//
+// Deprecated: use CELTypeProvider()
+func (e *Env) TypeProvider() ref.TypeProvider {
+ if legacyProvider, ok := e.provider.(ref.TypeProvider); ok {
+ return legacyProvider
+ }
+ return &interopLegacyTypeProvider{Provider: e.provider}
+}
+
+// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
+// Env as unknown AttributePattern values.
+//
+// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) UnknownVars() interpreter.PartialActivation {
+ act := interpreter.EmptyActivation()
+ part, _ := PartialVars(act, e.computeUnknownVars(act)...)
+ return part
+}
+
+// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
+// set, but which have been configured in the environment, are marked as unknown.
+//
+// The `vars` value may either be an interpreter.Activation or any valid input to the
+// interpreter.NewActivation call.
+//
+// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
+// variables. For more advanced use cases of partial state where portions of an object graph, rather
+// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
+//
+// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
+// PartialAttributes option is provided as a ProgramOption.
+func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
+ act, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ return PartialVars(act, e.computeUnknownVars(act)...)
+}
+
+// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
+// attribute references which are unknown.
+//
+// Residual expressions are beneficial in a few scenarios:
+//
+// - Optimizing constant expression evaluations away.
+// - Indexing and pruning expressions based on known input arguments.
+// - Surfacing additional requirements that are needed in order to complete an evaluation.
+// - Sharing the evaluation of an expression across multiple machines/nodes.
+//
+// For example, if an expression targets a 'resource' and 'request' attribute and the possible
+// values for the resource are known, a PartialActivation could mark the 'request' as an unknown
+// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts
+// of the expression that reference the 'request'.
+//
+// Note, the expression ids within the residual AST generated through this method have no
+// correlation to the expression ids of the original AST.
+//
+// See the PartialVars helper for how to construct a PartialActivation.
+//
+// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
+// Ast format and then Program again.
+func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
+ pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State())
+ newAST := &Ast{source: a.Source(), impl: pruned}
+ expr, err := AstToString(newAST)
+ if err != nil {
+ return nil, err
+ }
+ parsed, iss := e.Parse(expr)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ if !a.IsChecked() {
+ return parsed, nil
+ }
+ checked, iss := e.Check(parsed)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ return checked, nil
+}
+
+// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
+// extension functions provided by estimator.
+func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
+ extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
+ extendedOpts = append(extendedOpts, opts...)
+ extendedOpts = append(extendedOpts, e.costOptions...)
+ return checker.Cost(ast.impl, estimator, extendedOpts...)
+}
+
+// configure applies a series of EnvOptions to the current environment.
+func (e *Env) configure(opts []EnvOption) (*Env, error) {
+ // Customized the environment using the provided EnvOption values. If an error is
+ // generated at any step this, will be returned as a nil Env with a non-nil error.
+ var err error
+ for _, opt := range opts {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // If the default UTC timezone fix has been enabled, make sure the library is configured
+ e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
+ if err != nil {
+ return nil, err
+ }
+
+ // Configure the parser.
+ prsrOpts := []parser.Option{}
+ prsrOpts = append(prsrOpts, e.prsrOpts...)
+ prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
+
+ if e.HasFeature(featureEnableMacroCallTracking) {
+ prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
+ }
+ if e.HasFeature(featureVariadicLogicalASTs) {
+ prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
+ }
+ e.prsr, err = parser.NewParser(prsrOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ensure that the checker init happens eagerly rather than lazily.
+ if e.HasFeature(featureEagerlyValidateDeclarations) {
+ _, err := e.initChecker()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return e, nil
+}
+
+func (e *Env) initChecker() (*checker.Env, error) {
+ e.chkOnce.Do(func() {
+ chkOpts := []checker.Option{}
+ chkOpts = append(chkOpts, e.chkOpts...)
+ chkOpts = append(chkOpts,
+ checker.CrossTypeNumericComparisons(
+ e.HasFeature(featureCrossTypeNumericComparisons)))
+
+ ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the statically configured declarations.
+ err = ce.AddIdents(e.variables...)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ // Add the function declarations which are derived from the FunctionDecl instances.
+ for _, fn := range e.functions {
+ if fn.IsDeclarationDisabled() {
+ continue
+ }
+ err = ce.AddFunctions(fn)
+ if err != nil {
+ e.setCheckerOrError(nil, err)
+ return
+ }
+ }
+ // Add function declarations here separately.
+ e.setCheckerOrError(ce, nil)
+ })
+ return e.getCheckerOrError()
+}
+
+// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) {
+ e.chkMutex.Lock()
+ e.chk = chk
+ e.chkErr = chkErr
+ e.chkMutex.Unlock()
+}
+
+// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) getCheckerOrError() (*checker.Env, error) {
+ e.chkMutex.Lock()
+ defer e.chkMutex.Unlock()
+ return e.chk, e.chkErr
+}
+
+// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
+// the feature if it has not already been enabled.
+func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
+ if !e.HasFeature(feature) {
+ return e, nil
+ }
+ _, applied := e.appliedFeatures[feature]
+ if applied {
+ return e, nil
+ }
+ e, err := option(e)
+ if err != nil {
+ return nil, err
+ }
+ // record that the feature has been applied since it will generate declarations
+ // and functions which will be propagated on Extend() calls and which should only
+ // be registered once.
+ e.appliedFeatures[feature] = true
+ return e, nil
+}
+
+// computeUnknownVars determines a set of missing variables based on the input activation and the
+// environment's configured declaration set.
+func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
+ var unknownPatterns []*interpreter.AttributePattern
+ for _, v := range e.variables {
+ varName := v.Name()
+ if _, found := vars.ResolveName(varName); found {
+ continue
+ }
+ unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName))
+ }
+ return unknownPatterns
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error = common.Error
+
+// Issues defines methods for inspecting the error details of parse and check calls.
+//
+// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
+type Issues struct {
+ errs *common.Errors
+ info *celast.SourceInfo
+}
+
+// NewIssues returns an Issues struct from a common.Errors object.
+func NewIssues(errs *common.Errors) *Issues {
+ return NewIssuesWithSourceInfo(errs, nil)
+}
+
+// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
+// which can be used with the `ReportErrorAtID` method for additional error reports within the context
+// information that's inferred from an expression id.
+func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues {
+ return &Issues{
+ errs: errs,
+ info: info,
+ }
+}
+
+// Err returns an error value if the issues list contains one or more errors.
+func (i *Issues) Err() error {
+ if i == nil {
+ return nil
+ }
+ if len(i.Errors()) > 0 {
+ return errors.New(i.String())
+ }
+ return nil
+}
+
+// Errors returns the collection of errors encountered in more granular detail.
+func (i *Issues) Errors() []*Error {
+ if i == nil {
+ return []*Error{}
+ }
+ return i.errs.GetErrors()
+}
+
+// Append collects the issues from another Issues struct into a new Issues object.
+func (i *Issues) Append(other *Issues) *Issues {
+ if i == nil {
+ return other
+ }
+ if other == nil || i == other {
+ return i
+ }
+ return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info)
+}
+
+// String converts the issues to a suitable display string.
+func (i *Issues) String() string {
+ if i == nil {
+ return ""
+ }
+ return i.errs.ToDisplayString()
+}
+
+// ReportErrorAtID reports an error message with an optional set of formatting arguments.
+//
+// The source metadata for the expression at `id`, if present, is attached to the error report.
+// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
+func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
+ i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...)
+}
+
+// getStdEnv lazy initializes the CEL standard environment.
+func getStdEnv() (*Env, error) {
+ stdEnvInit.Do(func() {
+ stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true))
+ })
+ return stdEnv, stdEnvErr
+}
+
+// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider.
+type interopCELTypeProvider struct {
+ ref.TypeProvider
+}
+
+// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
+//
+// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
+ if et, found := p.FindType(typeName); found {
+ t, err := types.ExprTypeToType(et)
+ if err != nil {
+ return nil, false
+ }
+ return t, true
+ }
+ return nil, false
+}
+
+// FindStructFieldNames returns an empty set of field for the interop provider.
+//
+// To inspect the field names, migrate to a `types.Provider` implementation.
+func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
+ return []string{}, false
+}
+
+// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
+// name, if one exists.
+//
+// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type
+// into a native type representation. If the conversion fails, the type is listed as not found.
+func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
+ if ft, found := p.FindFieldType(structType, fieldName); found {
+ t, err := types.ExprTypeToType(ft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &types.FieldType{
+ Type: t,
+ IsSet: ft.IsSet,
+ GetFrom: ft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider.
+type interopLegacyTypeProvider struct {
+ types.Provider
+}
+
+// FindType retruns the protobuf Type representation for the input type name if one exists.
+//
+// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type
+// value to a protobuf Type representation.
+//
+// Failure to convert the type will result in the type not being found.
+func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
+ if t, found := p.FindStructType(typeName); found {
+ et, err := types.TypeToExprType(t)
+ if err != nil {
+ return nil, false
+ }
+ return et, true
+ }
+ return nil, false
+}
+
+// FindFieldType returns the protobuf-based FieldType representation for the input type name and field,
+// if one exists.
+//
+// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType
+// value to a protobuf-based ref.FieldType representation if found.
+//
+// Failure to convert the FieldType will result in the field not being found.
+func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ if cft, found := p.FindStructFieldType(structType, fieldName); found {
+ et, err := types.TypeToExprType(cft.Type)
+ if err != nil {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: et,
+ IsSet: cft.IsSet,
+ GetFrom: cft.GetFrom,
+ }, true
+ }
+ return nil, false
+}
+
+var (
+ stdEnvInit sync.Once
+ stdEnv *Env
+ stdEnvErr error
+)
diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go
new file mode 100644
index 000000000..d7060896d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/folding.go
@@ -0,0 +1,559 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// ConstantFoldingOption defines a functional option for configuring constant folding.
+type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error)
+
+// MaxConstantFoldIterations limits the number of times literals may be folding during optimization.
+//
+// Defaults to 100 if not set.
+func MaxConstantFoldIterations(limit int) ConstantFoldingOption {
+ return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) {
+ opt.maxFoldIterations = limit
+ return opt, nil
+ }
+}
+
+// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate
+// literal values within function calls and select statements with their evaluated result.
+func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) {
+ folder := &constantFoldingOptimizer{
+ maxFoldIterations: defaultMaxConstantFoldIterations,
+ }
+ var err error
+ for _, o := range opts {
+ folder, err = o(folder)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return folder, nil
+}
+
+type constantFoldingOptimizer struct {
+ maxFoldIterations int
+}
+
+// Optimize queries the expression graph for scalar and aggregate literal expressions within call and
+// select statements and then evaluates them and replaces the call site with the literal result.
+//
+// Note: only values which can be represented as literals in CEL syntax are supported.
+func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+
+ // Walk the list of foldable expression and continue to fold until there are no more folds left.
+ // All of the fold candidates returned by the constantExprMatcher should succeed unless there's
+ // a logic bug with the selection of expressions.
+ foldableExprs := ast.MatchDescendants(root, constantExprMatcher)
+ foldCount := 0
+ for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations {
+ for _, fold := range foldableExprs {
+ // If the expression could be folded because it's a non-strict call, and the
+ // branches are pruned, continue to the next fold.
+ if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) {
+ continue
+ }
+ // Otherwise, assume all context is needed to evaluate the expression.
+ err := tryFold(ctx, a, fold)
+ if err != nil {
+ ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return a
+ }
+ }
+ foldCount++
+ foldableExprs = ast.MatchDescendants(root, constantExprMatcher)
+ }
+ // Once all of the constants have been folded, try to run through the remaining comprehensions
+ // one last time. In this case, there's no guarantee they'll run, so we only update the
+ // target comprehension node with the literal value if the evaluation succeeds.
+ for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) {
+ tryFold(ctx, a, compre)
+ }
+
+ // If the output is a list, map, or struct which contains optional entries, then prune it
+ // to make sure that the optionals, if resolved, do not surface in the output literal.
+ pruneOptionalElements(ctx, root)
+
+ // Ensure that all intermediate values in the folded expression can be represented as valid
+ // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents`
+ // to avoid extra allocations during this final pass through the AST.
+ ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() != ast.LiteralKind {
+ return
+ }
+ val := e.AsLiteral()
+ adapted, err := adaptLiteral(ctx, val)
+ if err != nil {
+ ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error())
+ return
+ }
+ ctx.UpdateExpr(e, adapted)
+ }))
+
+ return a
+}
+
+// tryFold attempts to evaluate a sub-expression to a literal.
+//
+// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise
+// the method will return an error.
+func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error {
+ // Assume all context is needed to evaluate the expression.
+ subAST := &Ast{
+ impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()),
+ }
+ prg, err := ctx.Program(subAST)
+ if err != nil {
+ return err
+ }
+ out, _, err := prg.Eval(NoVars())
+ if err != nil {
+ return err
+ }
+ // Update the fold expression to be a literal.
+ ctx.UpdateExpr(expr, ctx.NewLiteral(out))
+ return nil
+}
+
+// maybePruneBranches inspects the non-strict call expression to determine whether
+// a branch can be removed. Evaluation will naturally prune logical and / or calls,
+// but conditional will not be pruned cleanly, so this is one small area where the
+// constant folding step reimplements a portion of the evaluator.
+func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool {
+ call := expr.AsCall()
+ args := call.Args()
+ switch call.FunctionName() {
+ case operators.LogicalAnd, operators.LogicalOr:
+ return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr)
+ case operators.Conditional:
+ cond := args[0]
+ truthy := args[1]
+ falsy := args[2]
+ if cond.Kind() != ast.LiteralKind {
+ return false
+ }
+ if cond.AsLiteral() == types.True {
+ ctx.UpdateExpr(expr, truthy)
+ } else {
+ ctx.UpdateExpr(expr, falsy)
+ }
+ return true
+ case operators.In:
+ haystack := args[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.False))
+ return true
+ }
+ needle := args[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ ctx.UpdateExpr(expr, ctx.NewLiteral(types.True))
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool {
+ shortcircuit := types.False
+ skip := types.True
+ if function == operators.LogicalOr {
+ shortcircuit = types.True
+ skip = types.False
+ }
+ newArgs := []ast.Expr{}
+ for _, arg := range args {
+ if arg.Kind() != ast.LiteralKind {
+ newArgs = append(newArgs, arg)
+ continue
+ }
+ if arg.AsLiteral() == skip {
+ continue
+ }
+ if arg.AsLiteral() == shortcircuit {
+ ctx.UpdateExpr(expr, arg)
+ return true
+ }
+ }
+ if len(newArgs) == 0 {
+ newArgs = append(newArgs, args[0])
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ if len(newArgs) == 1 {
+ ctx.UpdateExpr(expr, newArgs[0])
+ return true
+ }
+ ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...))
+ return true
+}
+
+// pruneOptionalElements works from the bottom up to resolve optional elements within
+// aggregate literals.
+//
+// Note, many aggregate literals will be resolved as arguments to functions or select
+// statements, so this method exists to handle the case where the literal could not be
+// fully resolved or exists outside of a call, select, or comprehension context.
+func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) {
+ aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher)
+ for _, lit := range aggregateLiterals {
+ switch lit.Kind() {
+ case ast.ListKind:
+ pruneOptionalListElements(ctx, lit)
+ case ast.MapKind:
+ pruneOptionalMapEntries(ctx, lit)
+ case ast.StructKind:
+ pruneOptionalStructFields(ctx, lit)
+ }
+ }
+}
+
+func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) {
+ l := e.AsList()
+ elems := l.Elements()
+ optIndices := l.OptionalIndices()
+ if len(optIndices) == 0 {
+ return
+ }
+ updatedElems := []ast.Expr{}
+ updatedIndices := []int32{}
+ newOptIndex := -1
+ for _, e := range elems {
+ newOptIndex++
+ if !l.IsOptional(int32(newOptIndex)) {
+ updatedElems = append(updatedElems, e)
+ continue
+ }
+ if e.Kind() != ast.LiteralKind {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ optElemVal, ok := e.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedElems = append(updatedElems, e)
+ updatedIndices = append(updatedIndices, int32(newOptIndex))
+ continue
+ }
+ if !optElemVal.HasValue() {
+ newOptIndex-- // Skipping causes the list to get smaller.
+ continue
+ }
+ ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedElems = append(updatedElems, e)
+ }
+ ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices))
+}
+
+func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) {
+ m := e.AsMap()
+ entries := m.Entries()
+ updatedEntries := []ast.EntryExpr{}
+ modified := false
+ for _, e := range entries {
+ entry := e.AsMapEntry()
+ key := entry.Key()
+ val := entry.Value()
+ // If the entry is not optional, or the value-side of the optional hasn't
+ // been resolved to a literal, then preserve the entry as-is.
+ if !entry.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ // When the key is not a literal, but the value is, then it needs to be
+ // restored to an optional value.
+ if key.Kind() != ast.LiteralKind {
+ undoOptVal, err := adaptLiteral(ctx, optElemVal)
+ if err != nil {
+ ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err)
+ }
+ ctx.UpdateExpr(val, undoOptVal)
+ updatedEntries = append(updatedEntries, e)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedEntry := ctx.NewMapEntry(key, val, false)
+ updatedEntries = append(updatedEntries, updatedEntry)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewMap(updatedEntries))
+ }
+}
+
+func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) {
+ s := e.AsStruct()
+ fields := s.Fields()
+ updatedFields := []ast.EntryExpr{}
+ modified := false
+ for _, f := range fields {
+ field := f.AsStructField()
+ val := field.Value()
+ if !field.IsOptional() || val.Kind() != ast.LiteralKind {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ optElemVal, ok := val.AsLiteral().(*types.Optional)
+ if !ok {
+ updatedFields = append(updatedFields, f)
+ continue
+ }
+ modified = true
+ if !optElemVal.HasValue() {
+ continue
+ }
+ ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue()))
+ updatedField := ctx.NewStructField(field.Name(), val, false)
+ updatedFields = append(updatedFields, updatedField)
+ }
+ if modified {
+ ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields))
+ }
+}
+
+// adaptLiteral converts a runtime CEL value to its equivalent literal expression.
+//
+// For strongly typed values, the type-provider will be used to reconstruct the fields
+// which are present in the literal and their equivalent initialization values.
+func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) {
+ switch t := val.Type().(type) {
+ case *types.Type:
+ switch t {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ return ctx.NewLiteral(val), nil
+ case types.DurationType:
+ return ctx.NewCall(
+ overloads.TypeConvertDuration,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.TimestampType:
+ return ctx.NewCall(
+ overloads.TypeConvertTimestamp,
+ ctx.NewLiteral(val.ConvertToType(types.StringType)),
+ ), nil
+ case types.OptionalType:
+ opt := val.(*types.Optional)
+ if !opt.HasValue() {
+ return ctx.NewCall("optional.none"), nil
+ }
+ target, err := adaptLiteral(ctx, opt.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return ctx.NewCall("optional.of", target), nil
+ case types.TypeType:
+ return ctx.NewIdent(val.(*types.Type).TypeName()), nil
+ case types.ListType:
+ l, ok := val.(traits.Lister)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ elems := make([]ast.Expr, l.Size().(types.Int))
+ idx := 0
+ it := l.Iterator()
+ for it.HasNext() == types.True {
+ elemVal := it.Next()
+ elemExpr, err := adaptLiteral(ctx, elemVal)
+ if err != nil {
+ return nil, err
+ }
+ elems[idx] = elemExpr
+ idx++
+ }
+ return ctx.NewList(elems, []int32{}), nil
+ case types.MapType:
+ m, ok := val.(traits.Mapper)
+ if !ok {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ entries := make([]ast.EntryExpr, m.Size().(types.Int))
+ idx := 0
+ it := m.Iterator()
+ for it.HasNext() == types.True {
+ keyVal := it.Next()
+ keyExpr, err := adaptLiteral(ctx, keyVal)
+ if err != nil {
+ return nil, err
+ }
+ valVal := m.Get(keyVal)
+ valExpr, err := adaptLiteral(ctx, valVal)
+ if err != nil {
+ return nil, err
+ }
+ entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false)
+ idx++
+ }
+ return ctx.NewMap(entries), nil
+ default:
+ provider := ctx.CELTypeProvider()
+ fields, found := provider.FindStructFieldNames(t.TypeName())
+ if !found {
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+ }
+ tester := val.(traits.FieldTester)
+ indexer := val.(traits.Indexer)
+ fieldInits := []ast.EntryExpr{}
+ for _, f := range fields {
+ field := types.String(f)
+ if tester.IsSet(field) != types.True {
+ continue
+ }
+ fieldVal := indexer.Get(field)
+ fieldExpr, err := adaptLiteral(ctx, fieldVal)
+ if err != nil {
+ return nil, err
+ }
+ fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false))
+ }
+ return ctx.NewStruct(t.TypeName(), fieldInits), nil
+ }
+ }
+ return nil, fmt.Errorf("failed to adapt %v to literal", val)
+}
+
+// constantExprMatcher matches calls, select statements, and comprehensions whose arguments
+// are all constant scalar or aggregate literal values.
+//
+// Only comprehensions which are not nested are included as possible constant folds, and only
+// if all variables referenced in the comprehension stack exist are only iteration or
+// accumulation variables.
+func constantExprMatcher(e ast.NavigableExpr) bool {
+ switch e.Kind() {
+ case ast.CallKind:
+ return constantCallMatcher(e)
+ case ast.SelectKind:
+ sel := e.AsSelect() // guaranteed to be a navigable value
+ return constantMatcher(sel.Operand().(ast.NavigableExpr))
+ case ast.ComprehensionKind:
+ if isNestedComprehension(e) {
+ return false
+ }
+ vars := map[string]bool{}
+ constantExprs := true
+ visitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if e.Kind() == ast.ComprehensionKind {
+ nested := e.AsComprehension()
+ vars[nested.AccuVar()] = true
+ vars[nested.IterVar()] = true
+ }
+ if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] {
+ constantExprs = false
+ }
+ })
+ ast.PreOrderVisit(e, visitor)
+ return constantExprs
+ default:
+ return false
+ }
+}
+
+// constantCallMatcher identifies strict and non-strict calls which can be folded.
+func constantCallMatcher(e ast.NavigableExpr) bool {
+ call := e.AsCall()
+ children := e.Children()
+ fnName := call.FunctionName()
+ if fnName == operators.LogicalAnd {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.LogicalOr {
+ for _, child := range children {
+ if child.Kind() == ast.LiteralKind {
+ return true
+ }
+ }
+ }
+ if fnName == operators.Conditional {
+ cond := children[0]
+ if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType {
+ return true
+ }
+ }
+ if fnName == operators.In {
+ haystack := children[1]
+ if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 {
+ return true
+ }
+ needle := children[0]
+ if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind {
+ needleValue := needle.AsLiteral()
+ list := haystack.AsList()
+ for _, e := range list.Elements() {
+ if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True {
+ return true
+ }
+ }
+ }
+ }
+ // convert all other calls with constant arguments
+ for _, child := range children {
+ if !constantMatcher(child) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNestedComprehension(e ast.NavigableExpr) bool {
+ parent, found := e.Parent()
+ for found {
+ if parent.Kind() == ast.ComprehensionKind {
+ return true
+ }
+ parent, found = parent.Parent()
+ }
+ return false
+}
+
+func aggregateLiteralMatcher(e ast.NavigableExpr) bool {
+ return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind
+}
+
+var (
+ constantMatcher = ast.ConstantValueMatcher()
+)
+
+const (
+ defaultMaxConstantFoldIterations = 100
+)
diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go
new file mode 100644
index 000000000..78d5bea65
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/inlining.go
@@ -0,0 +1,228 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// InlineVariable holds a variable name to be matched and an AST representing
+// the expression graph which should be used to replace it.
+type InlineVariable struct {
+ name string
+ alias string
+ def *ast.AST
+}
+
+// Name returns the qualified variable or field selection to replace.
+func (v *InlineVariable) Name() string {
+ return v.name
+}
+
+// Alias returns the alias to use when performing cel.bind() calls during inlining.
+func (v *InlineVariable) Alias() string {
+ return v.alias
+}
+
+// Expr returns the inlined expression value.
+func (v *InlineVariable) Expr() ast.Expr {
+ return v.def.Expr()
+}
+
+// Type indicates the inlined expression type.
+func (v *InlineVariable) Type() *Type {
+ return v.def.GetType(v.def.Expr().ID())
+}
+
+// NewInlineVariable declares a variable name to be replaced by a checked expression.
+func NewInlineVariable(name string, definition *Ast) *InlineVariable {
+ return NewInlineVariableWithAlias(name, name, definition)
+}
+
+// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression.
+// If the variable occurs more than once, the provided alias will be used to replace the expressions
+// where the variable name occurs.
+func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable {
+ return &InlineVariable{name: name, alias: alias, def: definition.impl}
+}
+
+// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions.
+//
+// If a variable occurs one time, the variable is replaced by the inline definition. If the
+// variable occurs more than once, the variable occurences are replaced by a cel.bind() call.
+func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer {
+ return &inliningOptimizer{variables: inlineVars}
+}
+
+type inliningOptimizer struct {
+ variables []*InlineVariable
+}
+
+func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST {
+ root := ast.NavigateAST(a)
+ for _, inlineVar := range opt.variables {
+ matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name()))
+ // Skip cases where the variable isn't in the expression graph
+ if len(matches) == 0 {
+ continue
+ }
+
+ // For a single match, do a direct replacement of the expression sub-graph.
+ if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) {
+ for _, match := range matches {
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type())
+ }
+ continue
+ }
+
+ // For multiple matches, find the least common ancestor (lca) and insert the
+ // variable as a cel.bind() macro.
+ var lca ast.NavigableExpr = root
+ lcaAncestorCount := 0
+ ancestors := map[int64]int{}
+ for _, match := range matches {
+ // Update the identifier matches with the provided alias.
+ parent, found := match, true
+ for found {
+ ancestorCount, hasAncestor := ancestors[parent.ID()]
+ if !hasAncestor {
+ ancestors[parent.ID()] = 1
+ parent, found = parent.Parent()
+ continue
+ }
+ if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) {
+ lca = parent
+ lcaAncestorCount = ancestorCount
+ }
+ ancestors[parent.ID()] = ancestorCount + 1
+ parent, found = parent.Parent()
+ }
+ aliasExpr := ctx.NewIdent(inlineVar.Alias())
+ opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type())
+ }
+
+ // Copy the inlined AST expr and source info.
+ copyExpr := ctx.CopyASTAndMetadata(inlineVar.def)
+ // Update the least common ancestor by inserting a cel.bind() call to the alias.
+ inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca)
+ opt.inlineExpr(ctx, lca, inlined, inlineVar.Type())
+ ctx.SetMacroCall(lca.ID(), bindMacro)
+ }
+ return a
+}
+
+// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining
+// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is
+// made to determine whether the inlined value can be presence or existence tested.
+func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) {
+ switch prev.Kind() {
+ case ast.SelectKind:
+ sel := prev.AsSelect()
+ if !sel.IsTestOnly() {
+ ctx.UpdateExpr(prev, inlined)
+ return
+ }
+ opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType)
+ default:
+ ctx.UpdateExpr(prev, inlined)
+ }
+}
+
+// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe
+// expression appropriate for the inlined type, if possible.
+//
+// If the rewrite is not possible an error is reported at the inline expression site.
+func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) {
+ // If the input inlined expression is not a select expression it won't work with the has()
+ // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error.
+ if inlined.Kind() == ast.SelectKind {
+ presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined)
+ ctx.UpdateExpr(prev, presenceTest)
+ ctx.SetMacroCall(prev.ID(), hasMacro)
+ return
+ }
+
+ ctx.ClearMacroCall(prev.ID())
+ if inlinedType.IsAssignableType(NullType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ inlined,
+ ctx.NewLiteral(types.NullValue),
+ ))
+ return
+ }
+ if inlinedType.HasTrait(traits.SizerType) {
+ ctx.UpdateExpr(prev,
+ ctx.NewCall(operators.NotEquals,
+ ctx.NewMemberCall(overloads.Size, inlined),
+ ctx.NewLiteral(types.IntZero),
+ ))
+ return
+ }
+ ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType)
+}
+
+// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression
+// being replaced occurs within a presence test. Value types with a size() method or field selection
+// support can be bound.
+//
+// In future iterations, support may also be added for indexer types which can be rewritten as an `in`
+// expression; however, this would imply a rewrite of the inlined expression that may not be necessary
+// in most cases.
+func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool {
+ if inlinedType.IsAssignableType(NullType) ||
+ inlinedType.HasTrait(traits.SizerType) {
+ return true
+ }
+ for _, m := range matches {
+ if m.Kind() != ast.SelectKind {
+ continue
+ }
+ sel := m.AsSelect()
+ if sel.IsTestOnly() {
+ return false
+ }
+ }
+ return true
+}
+
+// matchVariable matches simple identifiers, select expressions, and presence test expressions
+// which match the (potentially) qualified variable name provided as input.
+//
+// Note, this function does not support inlining against select expressions which includes optional
+// field selection. This may be a future refinement.
+func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher {
+ return func(e ast.NavigableExpr) bool {
+ if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
+ return true
+ }
+ if e.Kind() == ast.SelectKind {
+ sel := e.AsSelect()
+ // While the `ToQualifiedName` call could take the select directly, this
+ // would skip presence tests from possible matches, which we would like
+ // to include.
+ qualName, found := containers.ToQualifiedName(sel.Operand())
+ return found && qualName+"."+sel.FieldName() == varName
+ }
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
new file mode 100644
index 000000000..7d08d1c81
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -0,0 +1,288 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/parser"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// CheckedExprToAst converts a checked expression proto message to an Ast.
+func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
+ checked, _ := CheckedExprToAstWithSource(checkedExpr, nil)
+ return checked
+}
+
+// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general the source is not necessary unless the AST has been modified between the
+// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source
+// through future calls.
+//
+// Prefer CheckedExprToAst if loading expressions from storage.
+func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
+ checked, err := ast.ToAST(checkedExpr)
+ if err != nil {
+ return nil, err
+ }
+ return &Ast{source: src, impl: checked}, nil
+}
+
+// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
+//
+// If the Ast.IsChecked() returns false, this conversion method will return an error.
+func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
+ if !a.IsChecked() {
+ return nil, fmt.Errorf("cannot convert unchecked ast")
+ }
+ return ast.ToProto(a.impl)
+}
+
+// ParsedExprToAst converts a parsed expression proto message to an Ast.
+func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast {
+ return ParsedExprToAstWithSource(parsedExpr, nil)
+}
+
+// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast,
+// using the provided Source as the textual contents.
+//
+// In general you only need this if you need to recheck a previously checked
+// expression, or if you need to separately check a subset of an expression.
+//
+// Prefer ParsedExprToAst if loading expressions from storage.
+func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast {
+ info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo())
+ if src == nil {
+ src = common.NewInfoSource(parsedExpr.GetSourceInfo())
+ }
+ e, _ := ast.ProtoToExpr(parsedExpr.GetExpr())
+ return &Ast{source: src, impl: ast.NewAST(e, info)}
+}
+
+// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
+func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
+ return &exprpb.ParsedExpr{
+ Expr: a.Expr(),
+ SourceInfo: a.SourceInfo(),
+ }, nil
+}
+
+// AstToString converts an Ast back to a string if possible.
+//
+// Note, the conversion may not be an exact replica of the original expression, but will produce
+// a string that is semantically equivalent and whose textual representation is stable.
+func AstToString(a *Ast) (string, error) {
+ return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo())
+}
+
+// RefValueToValue converts between ref.Val and api.expr.Value.
+// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
+func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
+ return ValueAsAlphaProto(res)
+}
+
+func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
+ canonical, err := ValueAsProto(res)
+ if err != nil {
+ return nil, err
+ }
+ alpha := &exprpb.Value{}
+ err = convertProto(canonical, alpha)
+ return alpha, err
+}
+
+func ValueAsProto(res ref.Val) (*celpb.Value, error) {
+ switch res.Type() {
+ case types.BoolType:
+ return &celpb.Value{
+ Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil
+ case types.BytesType:
+ return &celpb.Value{
+ Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &celpb.Value{
+ Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil
+ case types.IntType:
+ return &celpb.Value{
+ Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil
+ case types.ListType:
+ l := res.(traits.Lister)
+ sz := l.Size().(types.Int)
+ elts := make([]*celpb.Value, 0, int64(sz))
+ for i := types.Int(0); i < sz; i++ {
+ v, err := ValueAsProto(l.Get(i))
+ if err != nil {
+ return nil, err
+ }
+ elts = append(elts, v)
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_ListValue{
+ ListValue: &celpb.ListValue{Values: elts}}}, nil
+ case types.MapType:
+ mapper := res.(traits.Mapper)
+ sz := mapper.Size().(types.Int)
+ entries := make([]*celpb.MapValue_Entry, 0, int64(sz))
+ for it := mapper.Iterator(); it.HasNext().(types.Bool); {
+ k := it.Next()
+ v := mapper.Get(k)
+ kv, err := ValueAsProto(k)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := ValueAsProto(v)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv})
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_MapValue{
+ MapValue: &celpb.MapValue{Entries: entries}}}, nil
+ case types.NullType:
+ return &celpb.Value{
+ Kind: &celpb.Value_NullValue{}}, nil
+ case types.StringType:
+ return &celpb.Value{
+ Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil
+ case types.TypeType:
+ typeName := res.(ref.Type).TypeName()
+ return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil
+ case types.UintType:
+ return &celpb.Value{
+ Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil
+ default:
+ any, err := res.ConvertToNative(anyPbType)
+ if err != nil {
+ return nil, err
+ }
+ return &celpb.Value{
+ Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil
+ }
+}
+
+var (
+ typeNameToTypeValue = map[string]ref.Val{
+ "bool": types.BoolType,
+ "bytes": types.BytesType,
+ "double": types.DoubleType,
+ "null_type": types.NullType,
+ "int": types.IntType,
+ "list": types.ListType,
+ "map": types.MapType,
+ "string": types.StringType,
+ "type": types.TypeType,
+ "uint": types.UintType,
+ }
+
+ anyPbType = reflect.TypeOf(&anypb.Any{})
+)
+
+// ValueToRefValue converts between exprpb.Value and ref.Val.
+func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ return AlphaProtoAsValue(adapter, v)
+}
+
+func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
+ canonical := &celpb.Value{}
+ if err := convertProto(v, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsValue(adapter, canonical)
+}
+
+func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) {
+ switch v.Kind.(type) {
+ case *celpb.Value_NullValue:
+ return types.NullValue, nil
+ case *celpb.Value_BoolValue:
+ return types.Bool(v.GetBoolValue()), nil
+ case *celpb.Value_Int64Value:
+ return types.Int(v.GetInt64Value()), nil
+ case *celpb.Value_Uint64Value:
+ return types.Uint(v.GetUint64Value()), nil
+ case *celpb.Value_DoubleValue:
+ return types.Double(v.GetDoubleValue()), nil
+ case *celpb.Value_StringValue:
+ return types.String(v.GetStringValue()), nil
+ case *celpb.Value_BytesValue:
+ return types.Bytes(v.GetBytesValue()), nil
+ case *celpb.Value_ObjectValue:
+ any := v.GetObjectValue()
+ msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true})
+ if err != nil {
+ return nil, err
+ }
+ return adapter.NativeToValue(msg), nil
+ case *celpb.Value_MapValue:
+ m := v.GetMapValue()
+ entries := make(map[ref.Val]ref.Val)
+ for _, entry := range m.Entries {
+ key, err := ProtoAsValue(adapter, entry.Key)
+ if err != nil {
+ return nil, err
+ }
+ pb, err := ProtoAsValue(adapter, entry.Value)
+ if err != nil {
+ return nil, err
+ }
+ entries[key] = pb
+ }
+ return adapter.NativeToValue(entries), nil
+ case *celpb.Value_ListValue:
+ l := v.GetListValue()
+ elts := make([]ref.Val, len(l.Values))
+ for i, e := range l.Values {
+ rv, err := ProtoAsValue(adapter, e)
+ if err != nil {
+ return nil, err
+ }
+ elts[i] = rv
+ }
+ return adapter.NativeToValue(elts), nil
+ case *celpb.Value_TypeValue:
+ typeName := v.GetTypeValue()
+ tv, ok := typeNameToTypeValue[typeName]
+ if ok {
+ return tv, nil
+ }
+ return types.NewObjectTypeValue(typeName), nil
+ }
+ return nil, errors.New("unknown value")
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go
new file mode 100644
index 000000000..be59f1b02
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/library.go
@@ -0,0 +1,790 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/stdlib"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+)
+
+const (
+ optMapMacro = "optMap"
+ optFlatMapMacro = "optFlatMap"
+ hasValueFunc = "hasValue"
+ optionalNoneFunc = "optional.none"
+ optionalOfFunc = "optional.of"
+ optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
+ valueFunc = "value"
+ unusedIterVar = "#unused"
+)
+
+// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
+// environment for a particular use case or with a related set of functionality.
+//
+// Note, the ProgramOption values provided by a library are expected to be static and not vary
+// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to
+// configure these options outside the Library and within the Env.Program() call directly.
+type Library interface {
+ // CompileOptions returns a collection of functional options for configuring the Parse / Check
+ // environment.
+ CompileOptions() []EnvOption
+
+ // ProgramOptions returns a collection of functional options which should be included in every
+ // Program generated from the Env.Program() call.
+ ProgramOptions() []ProgramOption
+}
+
+// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
+// configured once within the environment.
+type SingletonLibrary interface {
+ Library
+
+ // LibraryName provides a namespaced name which is used to check whether the library has already
+ // been configured in the environment.
+ LibraryName() string
+}
+
+// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
+// and to be linked to each other.
+func Lib(l Library) EnvOption {
+ singleton, isSingleton := l.(SingletonLibrary)
+ return func(e *Env) (*Env, error) {
+ if isSingleton {
+ if e.HasLibrary(singleton.LibraryName()) {
+ return e, nil
+ }
+ e.libraries[singleton.LibraryName()] = true
+ }
+ var err error
+ for _, opt := range l.CompileOptions() {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.progOpts = append(e.progOpts, l.ProgramOptions()...)
+ return e, nil
+ }
+}
+
+// StdLib returns an EnvOption for the standard library of CEL functions and macros.
+func StdLib() EnvOption {
+ return Lib(stdLibrary{})
+}
+
+// stdLibrary implements the Library interface and provides functional options for the core CEL
+// features documented in the specification.
+type stdLibrary struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (stdLibrary) LibraryName() string {
+ return "cel.lib.std"
+}
+
+// CompileOptions returns options for the standard CEL function declarations and macros.
+func (stdLibrary) CompileOptions() []EnvOption {
+ return []EnvOption{
+ func(e *Env) (*Env, error) {
+ var err error
+ for _, fn := range stdlib.Functions() {
+ existing, found := e.functions[fn.Name()]
+ if found {
+ fn, err = existing.Merge(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ e.functions[fn.Name()] = fn
+ }
+ return e, nil
+ },
+ func(e *Env) (*Env, error) {
+ e.variables = append(e.variables, stdlib.Types()...)
+ return e, nil
+ },
+ Macros(StandardMacros...),
+ }
+}
+
+// ProgramOptions returns function implementations for the standard CEL functions.
+func (stdLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// OptionalTypes enable support for optional syntax and types in CEL.
+//
+// The optional value type makes it possible to express whether variables have
+// been provided, whether a result has been computed, and in the future whether
+// an object field path, map key value, or list index has a value.
+//
+// # Syntax Changes
+//
+// OptionalTypes are unlike other CEL extensions because they modify the CEL
+// syntax itself, notably through the use of a `?` preceding a field name or
+// index value.
+//
+// ## Field Selection
+//
+// The optional syntax in field selection is denoted as `obj.?field`. In other
+// words, if a field is set, return `optional.of(obj.field)“, else
+// `optional.none()`. The optional field selection is viral in the sense that
+// after the first optional selection all subsequent selections or indices
+// are treated as optional, i.e. the following expressions are equivalent:
+//
+// obj.?field.subfield
+// obj.?field.?subfield
+//
+// ## Indexing
+//
+// Similar to field selection, the optional syntax can be used in index
+// expressions on maps and lists:
+//
+// list[?0]
+// map[?key]
+//
+// ## Optional Field Setting
+//
+// When creating map or message literals, if a field may be optionally set
+// based on its presence, then placing a `?` before the field name or key
+// will ensure the type on the right-hand side must be optional(T) where T
+// is the type of the field or key-value.
+//
+// The following returns a map with the key expression set only if the
+// subfield is present, otherwise an empty map is created:
+//
+// {?key: obj.?field.subfield}
+//
+// ## Optional Element Setting
+//
+// When creating list literals, an element in the list may be optionally added
+// when the element expression is preceded by a `?`:
+//
+// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c]
+//
+// # Optional.Of
+//
+// Create an optional(T) value of a given value with type T.
+//
+// optional.of(10)
+//
+// # Optional.OfNonZeroValue
+//
+// Create an optional(T) value of a given value with type T if it is not a
+// zero-value. A zero-value the default empty value for any given CEL type,
+// including empty protobuf message types. If the value is empty, the result
+// of this call will be optional.none().
+//
+// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int))
+// optional.ofNonZeroValue([]) // optional.none()
+// optional.ofNonZeroValue(0) // optional.none()
+// optional.ofNonZeroValue("") // optional.none()
+//
+// # Optional.None
+//
+// Create an empty optional value.
+//
+// # HasValue
+//
+// Determine whether the optional contains a value.
+//
+// optional.of(b'hello').hasValue() // true
+// optional.ofNonZeroValue({}).hasValue() // false
+//
+// # Value
+//
+// Get the value contained by the optional. If the optional does not have a
+// value, the result will be a CEL error.
+//
+// optional.of(b'hello').value() // b'hello'
+// optional.ofNonZeroValue({}).value() // error
+//
+// # Or
+//
+// If the value on the left-hand side is optional.none(), the optional value
+// on the right hand side is returned. If the value on the left-hand set is
+// valued, then it is returned. This operation is short-circuiting and will
+// only evaluate as many links in the `or` chain as are needed to return a
+// non-empty optional value.
+//
+// obj.?field.or(m[?key])
+// l[?index].or(obj.?field.subfield).or(obj.?other)
+//
+// # OrValue
+//
+// Either return the value contained within the optional on the left-hand side
+// or return the alternative value on the right hand side.
+//
+// m[?key].orValue("none")
+//
+// # OptMap
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return an optional typed result based on the transformation. The
+// transformation expression type must return a type T which is wrapped into
+// an optional.
+//
+// msg.?elements.optMap(e, e.size()).orValue(0)
+//
+// # OptFlatMap
+//
+// Introduced in version: 1
+//
+// Apply a transformation to the optional's underlying value if it is not empty
+// and return the result. The transform expression must return an optional(T)
+// rather than type T. This can be useful when dealing with zero values and
+// conditionally generating an empty or non-empty result in ways which cannot
+// be expressed with `optMap`.
+//
+// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
+func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
+ lib := &optionalLib{version: math.MaxUint32}
+ for _, opt := range opts {
+ lib = opt(lib)
+ }
+ return Lib(lib)
+}
+
+type optionalLib struct {
+ version uint32
+}
+
+// OptionalTypesOption is a functional interface for configuring the strings library.
+type OptionalTypesOption func(*optionalLib) *optionalLib
+
+// OptionalTypesVersion configures the version of the optional type library.
+//
+// The version limits which functions are available. Only functions introduced
+// below or equal to the given version included in the library. If this option
+// is not set, all functions are available.
+//
+// See the library documentation to determine which version a function was introduced.
+// If the documentation does not state which version a function was introduced, it can
+// be assumed to be introduced at version 0, when the library was first created.
+func OptionalTypesVersion(version uint32) OptionalTypesOption {
+ return func(lib *optionalLib) *optionalLib {
+ lib.version = version
+ return lib
+ }
+}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (lib *optionalLib) LibraryName() string {
+ return "cel.lib.optional"
+}
+
+// CompileOptions implements the Library interface method.
+func (lib *optionalLib) CompileOptions() []EnvOption {
+ paramTypeK := TypeParamType("K")
+ paramTypeV := TypeParamType("V")
+ optionalTypeV := OptionalType(paramTypeV)
+ listTypeV := ListType(paramTypeV)
+ mapTypeKV := MapType(paramTypeK, paramTypeV)
+
+ opts := []EnvOption{
+ // Enable the optional syntax in the parser.
+ enableOptionalSyntax(),
+
+ // Introduce the optional type.
+ Types(types.OptionalType),
+
+ // Configure the optMap and optFlatMap macros.
+ Macros(ReceiverMacro(optMapMacro, 2, optMap)),
+
+ // Global and member functions for working with optional values.
+ Function(optionalOfFunc,
+ Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ return types.OptionalOf(value)
+ }))),
+ Function(optionalOfNonZeroValueFunc,
+ Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ v, isZeroer := value.(traits.Zeroer)
+ if !isZeroer || !v.IsZeroValue() {
+ return types.OptionalOf(value)
+ }
+ return types.OptionalNone
+ }))),
+ Function(optionalNoneFunc,
+ Overload("optional_none", []*Type{}, optionalTypeV,
+ FunctionBinding(func(values ...ref.Val) ref.Val {
+ return types.OptionalNone
+ }))),
+ Function(valueFunc,
+ MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return opt.GetValue()
+ }))),
+ Function(hasValueFunc,
+ MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return types.Bool(opt.HasValue())
+ }))),
+
+ // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
+ // evaluation chain.
+ Function("or",
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ Function("orValue",
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+
+ // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
+ // optput type.
+ Function(operators.OptSelect,
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+
+ // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
+ // these signatures to determine type-agreement without any special handling.
+ Function(operators.OptIndex,
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+
+ // Index overloads to accommodate using an optional value as the operand.
+ Function(operators.Index,
+ Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+ }
+ if lib.version >= 1 {
+ opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
+ }
+ return opts
+}
+
+// ProgramOptions implements the Library interface method.
+func (lib *optionalLib) ProgramOptions() []ProgramOption {
+ return []ProgramOption{
+ CustomDecorator(decorateOptionalOr),
+ }
+}
+
+func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
+ default:
+ return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.NewCall(
+ operators.Conditional,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewCall(optionalOfFunc,
+ meh.NewComprehension(
+ meh.NewList(),
+ unusedIterVar,
+ varName,
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
+ mapExpr,
+ ),
+ ),
+ meh.NewCall(optionalNoneFunc),
+ ), nil
+}
+
+func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.Kind() {
+ case ast.IdentKind:
+ varName = varIdent.AsIdent()
+ default:
+ return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier")
+ }
+ mapExpr := args[1]
+ return meh.NewCall(
+ operators.Conditional,
+ meh.NewMemberCall(hasValueFunc, target),
+ meh.NewComprehension(
+ meh.NewList(),
+ unusedIterVar,
+ varName,
+ meh.NewMemberCall(valueFunc, meh.Copy(target)),
+ meh.NewLiteral(types.False),
+ meh.NewIdent(varName),
+ mapExpr,
+ ),
+ meh.NewCall(optionalNoneFunc),
+ ), nil
+}
+
+func enableOptionalSyntax() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
+ return e, nil
+ }
+}
+
+// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field
+// selection is performed on a primitive type.
+func EnableErrorOnBadPresenceTest(value bool) EnvOption {
+ return features(featureEnableErrorOnBadPresenceTest, value)
+}
+
+func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return i, nil
+ }
+ switch call.Function() {
+ case "or":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
+ return i, nil
+ }
+ return &evalOptionalOr{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ case "orValue":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
+ return i, nil
+ }
+ return &evalOptionalOrValue{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ default:
+ return i, nil
+ }
+}
+
+// evalOptionalOr selects between two optional values, either the first if it has a value, or
+// the second optional expression is evaluated and returned.
+type evalOptionalOr struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOr) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
+// its value is returned, otherwise the alternative value expression is evaluated and returned.
+type evalOptionalOrValue struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOrValue) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal.GetValue()
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+type timeUTCLibrary struct{}
+
+func (timeUTCLibrary) CompileOptions() []EnvOption {
+ return timeOverloadDeclarations
+}
+
+func (timeUTCLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{}
+}
+
+// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified
+// in the CEL expression.
+var (
+ utcTZ = types.String("UTC")
+
+ timeOverloadDeclarations = []EnvOption{
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetHours))),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMinutes))),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetSeconds))),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
+ UnaryBinding(types.DurationGetMilliseconds))),
+ Function(overloads.TimeGetFullYear,
+ MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetFullYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetFullYear),
+ ),
+ ),
+ Function(overloads.TimeGetMonth,
+ MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMonth(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMonth),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfYear,
+ MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(func(ts, tz ref.Val) ref.Val {
+ return timestampGetDayOfYear(ts, tz)
+ }),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfMonth,
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthZeroBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthZeroBased),
+ ),
+ ),
+ Function(overloads.TimeGetDate,
+ MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfMonthOneBased(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfMonthOneBased),
+ ),
+ ),
+ Function(overloads.TimeGetDayOfWeek,
+ MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetDayOfWeek(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetDayOfWeek),
+ ),
+ ),
+ Function(overloads.TimeGetHours,
+ MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetHours(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetHours),
+ ),
+ ),
+ Function(overloads.TimeGetMinutes,
+ MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMinutes(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMinutes),
+ ),
+ ),
+ Function(overloads.TimeGetSeconds,
+ MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetSeconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetSeconds),
+ ),
+ ),
+ Function(overloads.TimeGetMilliseconds,
+ MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType,
+ UnaryBinding(func(ts ref.Val) ref.Val {
+ return timestampGetMilliseconds(ts, utcTZ)
+ }),
+ ),
+ MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType,
+ BinaryBinding(timestampGetMilliseconds),
+ ),
+ ),
+ }
+)
+
+func timestampGetFullYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Year())
+}
+
+func timestampGetMonth(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return types.Int(t.Month() - 1)
+}
+
+func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.YearDay() - 1)
+}
+
+func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day() - 1)
+}
+
+func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Day())
+}
+
+func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Weekday())
+}
+
+func timestampGetHours(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Hour())
+}
+
+func timestampGetMinutes(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Minute())
+}
+
+func timestampGetSeconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Second())
+}
+
+func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
+ t, err := inTimeZone(ts, tz)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return types.Int(t.Nanosecond() / 1000000)
+}
+
+func inTimeZone(ts, tz ref.Val) (time.Time, error) {
+ t := ts.(types.Timestamp)
+ val := string(tz.(types.String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return t.In(loc), nil
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return time.Time{}, err
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return t.In(timezone), nil
+}
diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go
new file mode 100644
index 000000000..4db1fd57a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/macro.go
@@ -0,0 +1,576 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Macro describes a function signature to match and the MacroExpander to apply.
+//
+// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
+// a Macro should be created per arg-count or as a var arg macro.
+type Macro = parser.Macro
+
+// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value.
+type MacroFactory = parser.MacroExpander
+
+// MacroExprFactory assists with the creation of Expr values in a manner which is consistent
+// the internal semantics and id generation behaviors of the parser and checker libraries.
+type MacroExprFactory = parser.ExprHelper
+
+// MacroExpander converts a call and its associated arguments into a protobuf Expr representation.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
+//
+// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
+// and produces as output an Expr ast node.
+//
+// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
+type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error)
+
+// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree.
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
+type MacroExprHelper interface {
+ // Copy the input expression with a brand new set of identifiers.
+ Copy(*exprpb.Expr) *exprpb.Expr
+
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
+
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
+
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry
+
+ // Fold creates a fold comprehension instruction.
+ //
+ // - iterVar is the iteration variable name.
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
+
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
+
+ // AccuIdent returns an accumulator identifier for use with comprehension results.
+ AccuIdent() *exprpb.Expr
+
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
+
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
+
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+
+ // NewError associates an error message with a given expression id.
+ NewError(exprID int64, message string) *Error
+}
+
+// GlobalMacro creates a Macro for a global function with the specified arg count.
+func GlobalMacro(function string, argCount int, factory MacroFactory) Macro {
+ return parser.NewGlobalMacro(function, argCount, factory)
+}
+
+// ReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro {
+ return parser.NewReceiverMacro(function, argCount, factory)
+}
+
+// GlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func GlobalVarArgMacro(function string, factory MacroFactory) Macro {
+ return parser.NewGlobalVarArgMacro(function, factory)
+}
+
+// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func ReceiverVarArgMacro(function string, factory MacroFactory) Macro {
+ return parser.NewReceiverVarArgMacro(function, factory)
+}
+
+// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+//
+// Deprecated: use GlobalMacro
+func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalMacro(function, argCount, expand.Expander)
+}
+
+// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+//
+// Deprecated: use ReceiverMacro
+func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverMacro(function, argCount, expand.Expander)
+}
+
+// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+//
+// Deprecated: use GlobalVarArgMacro
+func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewGlobalVarArgMacro(function, expand.Expander)
+}
+
+// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+//
+// Deprecated: use ReceiverVarArgMacro
+func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
+ expand := adaptingExpander{expander}
+ return parser.NewReceiverVarArgMacro(function, expand.Expander)
+}
+
+// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field)
+func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ arg, err := adaptToExpr(args[0])
+ if err != nil {
+ return nil, err
+ }
+ if arg.Kind() == ast.SelectKind {
+ s := arg.AsSelect()
+ return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName()))
+ }
+ return nil, ph.NewError(arg.ID(), "invalid argument to has() macro")
+}
+
+// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
+// elements in the range match the predicate expressions:
+// .exists(, )
+func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
+// one of the elements in the range match the predicate expressions:
+// .exists_one(, )
+func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the
+// input to produce an output list.
+//
+// There are two call patterns supported by map:
+//
+// .map(, )
+// .map(, , )
+//
+// In the second form only iterVar values which return true when provided to the predicate expression
+// are transformed.
+func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
+// only elements which match the provided predicate expression:
+// .filter(, )
+func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
+ ph, err := toParserHelper(meh)
+ if err != nil {
+ return nil, err
+ }
+ out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args))
+ if err != nil {
+ return nil, err
+ }
+ return adaptToProto(out)
+}
+
+var (
+ // Aliases to each macro in the CEL standard environment.
+ // Note: reassigning these macro variables may result in undefined behavior.
+
+ // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
+ // specify the field as a string.
+ HasMacro = parser.HasMacro
+
+ // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
+ // elements in the range satisfy the predicate.
+ AllMacro = parser.AllMacro
+
+ // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
+ // some element in the range satisfies the predicate.
+ ExistsMacro = parser.ExistsMacro
+
+ // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
+ // element in range the predicate holds.
+ ExistsOneMacro = parser.ExistsOneMacro
+
+ // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
+ // to each element in the range to produce a new list.
+ MapMacro = parser.MapMacro
+
+ // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
+ // first filters the elements in the range by the predicate, then applies the transform function
+ // to produce a new list.
+ MapFilterMacro = parser.MapFilterMacro
+
+ // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
+ // elements in the range, producing a new list from the elements that satisfy the predicate.
+ FilterMacro = parser.FilterMacro
+
+ // StandardMacros provides an alias to all the CEL macros defined in the standard environment.
+ StandardMacros = []Macro{
+ HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro,
+ }
+
+ // NoMacros provides an alias to an empty list of macros
+ NoMacros = []Macro{}
+)
+
+type adaptingExpander struct {
+ legacyExpander MacroExpander
+}
+
+func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ var legacyTarget *exprpb.Expr = nil
+ var err *Error = nil
+ if target != nil {
+ legacyTarget, err = adaptToProto(target)
+ if err != nil {
+ return nil, err
+ }
+ }
+ legacyArgs := make([]*exprpb.Expr, len(args))
+ for i, arg := range args {
+ legacyArgs[i], err = adaptToProto(arg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ah := &adaptingHelper{modernHelper: eh}
+ legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs)
+ if err != nil {
+ return nil, err
+ }
+ ex, err := adaptToExpr(legacyExpr)
+ if err != nil {
+ return nil, err
+ }
+ return ex, nil
+}
+
+func wrapErr(id int64, message string, err error) *common.Error {
+ return &common.Error{
+ Location: common.NoLocation,
+ Message: fmt.Sprintf("%s: %v", message, err),
+ ExprID: id,
+ }
+}
+
+type adaptingHelper struct {
+ modernHelper parser.ExprHelper
+}
+
+// Copy the input expression with a brand new set of identifiers.
+func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e)))
+}
+
+// LiteralBool creates an Expr value for a bool literal.
+func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value)))
+}
+
+// LiteralBytes creates an Expr value for a byte literal.
+func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value)))
+}
+
+// LiteralDouble creates an Expr value for double literal.
+func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value)))
+}
+
+// LiteralInt creates an Expr value for an int literal.
+func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value)))
+}
+
+// LiteralString creates am Expr value for a string literal.
+func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value)))
+}
+
+// LiteralUint creates an Expr value for a uint literal.
+func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value)))
+}
+
+// NewList creates a CreateList instruction where the list is comprised of the optional set
+// of elements provided as arguments.
+func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...))
+}
+
+// NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+// optional set of key, value entries.
+func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(entries))
+ for i, e := range entries {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...))
+}
+
+// NewMapEntry creates a Map Entry for the key, value pair.
+func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional))
+}
+
+// NewObject creates a CreateStruct instruction for an object with a given type name and
+// optional set of field initializers.
+func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
+ adaptedEntries := make([]ast.EntryExpr, len(fieldInits))
+ for i, e := range fieldInits {
+ adaptedEntries[i] = mustAdaptToEntryExpr(e)
+ }
+ return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...))
+}
+
+// NewObjectFieldInit creates a new Object field initializer from the field name and value.
+func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry {
+ return mustAdaptToProtoEntry(
+ ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional))
+}
+
+// Fold creates a fold comprehension instruction.
+//
+// - iterVar is the iteration variable name.
+// - iterRange represents the expression that resolves to a list or map where the elements or
+// keys (respectively) will be iterated over.
+// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+// - accuInit is the initial expression whose value will be set for the accuVar prior to
+// folding.
+// - condition is the expression to test to determine whether to continue folding.
+// - step is the expression to evaluation at the conclusion of a single fold iteration.
+// - result is the computation to evaluate at the conclusion of the fold.
+//
+// The accuVar should not shadow variable names that you would like to reference within the
+// environment in the step and condition expressions. Presently, the name __result__ is commonly
+// used by built-in macros but this may change in the future.
+func (ah *adaptingHelper) Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewComprehension(
+ mustAdaptToExpr(iterRange),
+ iterVar,
+ accuVar,
+ mustAdaptToExpr(accuInit),
+ mustAdaptToExpr(condition),
+ mustAdaptToExpr(step),
+ mustAdaptToExpr(result),
+ ),
+ )
+}
+
+// Ident creates an identifier Expr value.
+func (ah *adaptingHelper) Ident(name string) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewIdent(name))
+}
+
+// AccuIdent returns an accumulator identifier for use with comprehension results.
+func (ah *adaptingHelper) AccuIdent() *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewAccuIdent())
+}
+
+// GlobalCall creates a function call Expr value for a global (free) function.
+func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...))
+}
+
+// ReceiverCall creates a function call Expr value for a receiver-style function.
+func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ return mustAdaptToProto(
+ ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...))
+}
+
+// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field))
+}
+
+// Select create a field traversal Expr value.
+func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr {
+ op := mustAdaptToExpr(operand)
+ return mustAdaptToProto(ah.modernHelper.NewSelect(op, field))
+}
+
+// OffsetLocation returns the Location of the expression identifier.
+func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location {
+ return ah.modernHelper.OffsetLocation(exprID)
+}
+
+// NewError associates an error message with a given expression id.
+func (ah *adaptingHelper) NewError(exprID int64, message string) *Error {
+ return ah.modernHelper.NewError(exprID, message)
+}
+
+func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr {
+ adapted := make([]ast.Expr, len(exprs))
+ for i, e := range exprs {
+ adapted[i] = mustAdaptToExpr(e)
+ }
+ return adapted
+}
+
+func mustAdaptToExpr(e *exprpb.Expr) ast.Expr {
+ out, _ := adaptToExpr(e)
+ return out
+}
+
+func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ProtoToExpr(e)
+ if err != nil {
+ return nil, wrapErr(e.GetId(), "proto conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr {
+ out, _ := ast.ProtoToEntryExpr(e)
+ return out
+}
+
+func mustAdaptToProto(e ast.Expr) *exprpb.Expr {
+ out, _ := adaptToProto(e)
+ return out
+}
+
+func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) {
+ if e == nil {
+ return nil, nil
+ }
+ out, err := ast.ExprToProto(e)
+ if err != nil {
+ return nil, wrapErr(e.ID(), "expr conversion failure", err)
+ }
+ return out, nil
+}
+
+func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry {
+ out, _ := ast.EntryExprToProto(e)
+ return out
+}
+
+func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) {
+ ah, ok := meh.(*adaptingHelper)
+ if !ok {
+ return nil, common.NewError(0,
+ fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh),
+ common.NoLocation)
+ }
+ return ah.modernHelper, nil
+}
diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go
new file mode 100644
index 000000000..c149abb70
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/optimizer.go
@@ -0,0 +1,535 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order.
+//
+// The static optimizer normalizes expression ids and type-checking run between optimization
+// passes to ensure that the final optimized output is a valid expression with metadata consistent
+// with what would have been generated from a parsed and checked expression.
+//
+// Note: source position information is best-effort and likely wrong, but optimized expressions
+// should be suitable for calls to parser.Unparse.
+type StaticOptimizer struct {
+ optimizers []ASTOptimizer
+}
+
+// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied
+// to a checked expression.
+func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer {
+ return &StaticOptimizer{
+ optimizers: optimizers,
+ }
+}
+
+// Optimize applies a sequence of optimizations to an Ast within a given environment.
+//
+// If issues are encountered, the Issues.Err() return value will be non-nil.
+func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
+ // Make a copy of the AST to be optimized.
+ optimized := ast.Copy(a.impl)
+ ids := newIDGenerator(ast.MaxID(a.impl))
+
+ // Create the optimizer context, could be pooled in the future.
+ issues := NewIssues(common.NewErrors(a.Source()))
+ baseFac := ast.NewExprFactory()
+ exprFac := &optimizerExprFactory{
+ idGenerator: ids,
+ fac: baseFac,
+ sourceInfo: optimized.SourceInfo(),
+ }
+ ctx := &OptimizerContext{
+ optimizerExprFactory: exprFac,
+ Env: env,
+ Issues: issues,
+ }
+
+ // Apply the optimizations sequentially.
+ for _, o := range opt.optimizers {
+ optimized = o.Optimize(ctx, optimized)
+ if issues.Err() != nil {
+ return nil, issues
+ }
+ // Normalize expression id metadata including coordination with macro call metadata.
+ freshIDGen := newIDGenerator(0)
+ info := optimized.SourceInfo()
+ expr := optimized.Expr()
+ normalizeIDs(freshIDGen.renumberStable, expr, info)
+ cleanupMacroRefs(expr, info)
+
+ // Recheck the updated expression for any possible type-agreement or validation errors.
+ parsed := &Ast{
+ source: a.Source(),
+ impl: ast.NewAST(expr, info)}
+ checked, iss := ctx.Check(parsed)
+ if iss.Err() != nil {
+ return nil, iss
+ }
+ optimized = checked.impl
+ }
+
+ // Return the optimized result.
+ return &Ast{
+ source: a.Source(),
+ impl: optimized,
+ }, nil
+}
+
+// normalizeIDs ensures that the metadata present with an AST is reset in a manner such
+// that the ids within the expression correspond to the ids within macros.
+func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) {
+ optimized.RenumberIDs(idGen)
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sort the macro ids to make sure that the renumbering of macro-specific variables
+ // is stable across normalization calls.
+ sortedMacroIDs := []int64{}
+ for id := range info.MacroCalls() {
+ sortedMacroIDs = append(sortedMacroIDs, id)
+ }
+ sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] })
+
+ // First, update the macro call ids themselves.
+ callIDMap := map[int64]int64{}
+ for _, id := range sortedMacroIDs {
+ callIDMap[id] = idGen(id)
+ }
+ // Then update the macro call definitions which refer to these ids, but
+ // ensure that the updates don't collide and remove macro entries which haven't
+ // been visited / updated yet.
+ type macroUpdate struct {
+ id int64
+ call ast.Expr
+ }
+ macroUpdates := []macroUpdate{}
+ for _, oldID := range sortedMacroIDs {
+ newID := callIDMap[oldID]
+ call, found := info.GetMacroCall(oldID)
+ if !found {
+ continue
+ }
+ call.RenumberIDs(idGen)
+ macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call})
+ info.ClearMacroCall(oldID)
+ }
+ for _, u := range macroUpdates {
+ info.SetMacroCall(u.id, u.call)
+ }
+}
+
+func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) {
+ if len(info.MacroCalls()) == 0 {
+ return
+ }
+
+ // Sanitize the macro call references once the optimized expression has been computed
+ // and the ids normalized between the expression and the macros.
+ exprRefMap := make(map[int64]struct{})
+ ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ // Update the macro call id references to ensure that macro pointers are
+ // updated consistently across macros.
+ for _, call := range info.MacroCalls() {
+ ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) {
+ if e.ID() == 0 {
+ return
+ }
+ exprRefMap[e.ID()] = struct{}{}
+ }))
+ }
+ for id := range info.MacroCalls() {
+ if _, found := exprRefMap[id]; !found {
+ info.ClearMacroCall(id)
+ }
+ }
+}
+
+// newIDGenerator ensures that new ids are only created the first time they are encountered.
+func newIDGenerator(seed int64) *idGenerator {
+ return &idGenerator{
+ idMap: make(map[int64]int64),
+ seed: seed,
+ }
+}
+
+type idGenerator struct {
+ idMap map[int64]int64
+ seed int64
+}
+
+func (gen *idGenerator) nextID() int64 {
+ gen.seed++
+ return gen.seed
+}
+
+func (gen *idGenerator) renumberStable(id int64) int64 {
+ if id == 0 {
+ return 0
+ }
+ if newID, found := gen.idMap[id]; found {
+ return newID
+ }
+ nextID := gen.nextID()
+ gen.idMap[id] = nextID
+ return nextID
+}
+
+// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate
+// subexpressions and report any errors encountered along the way. The context also embeds the
+// optimizerExprFactory which can be used to generate new sub-expressions with expression ids
+// consistent with the expectations of a parsed expression.
+type OptimizerContext struct {
+ *Env
+ *optimizerExprFactory
+ *Issues
+}
+
+// ExtendEnv auguments the context's environment with the additional options.
+func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error {
+ e, err := opt.Env.Extend(opts...)
+ if err != nil {
+ return err
+ }
+ opt.Env = e
+ return nil
+}
+
+// ASTOptimizer applies an optimization over an AST and returns the optimized result.
+type ASTOptimizer interface {
+ // Optimize optimizes a type-checked AST within an Environment and accumulates any issues.
+ Optimize(*OptimizerContext, *ast.AST) *ast.AST
+}
+
+type optimizerExprFactory struct {
+ *idGenerator
+ fac ast.ExprFactory
+ sourceInfo *ast.SourceInfo
+}
+
+// NewAST creates an AST from the current expression using the tracked source info which
+// is modified and managed by the OptimizerContext.
+func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST {
+ return ast.NewAST(expr, opt.sourceInfo)
+}
+
+// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the
+// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions
+// between copies.
+//
+// Use this method before attempting to merge the expression from AST into another.
+func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) {
+ idGen := newIDGenerator(opt.nextID())
+ defer func() { opt.seed = idGen.nextID() }()
+ copyExpr := opt.fac.CopyExpr(a.Expr())
+ copyInfo := ast.CopySourceInfo(a.SourceInfo())
+ normalizeIDs(idGen.renumberStable, copyExpr, copyInfo)
+ return copyExpr, copyInfo
+}
+
+// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being
+// optimized.
+func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr {
+ copyExpr, copyInfo := opt.CopyAST(a)
+ for macroID, call := range copyInfo.MacroCalls() {
+ opt.SetMacroCall(macroID, call)
+ }
+ return copyExpr
+}
+
+// ClearMacroCall clears the macro at the given expression id.
+func (opt *optimizerExprFactory) ClearMacroCall(id int64) {
+ opt.sourceInfo.ClearMacroCall(id)
+}
+
+// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info
+// metadata.
+func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) {
+ opt.sourceInfo.SetMacroCall(id, expr)
+}
+
+// MacroCalls returns the map of macro calls currently in the context.
+func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr {
+ return opt.sourceInfo.MacroCalls()
+}
+
+// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression
+// representing the unexpanded call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) {
+ varID := opt.nextID()
+ remainingID := opt.nextID()
+ remaining = opt.fac.CopyExpr(remaining)
+ remaining.RenumberIDs(func(id int64) int64 {
+ if id == macroID {
+ return remainingID
+ }
+ return id
+ })
+ if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists {
+ opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call))
+ }
+
+ astExpr = opt.fac.NewComprehension(macroID,
+ opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}),
+ "#unused",
+ varName,
+ opt.fac.CopyExpr(varInit),
+ opt.fac.NewLiteral(opt.nextID(), types.False),
+ opt.fac.NewIdent(varID, varName),
+ remaining)
+
+ macroExpr = opt.fac.NewMemberCall(0, "bind",
+ opt.fac.NewIdent(opt.nextID(), "cel"),
+ opt.fac.NewIdent(varID, varName),
+ opt.fac.CopyExpr(varInit),
+ opt.fac.CopyExpr(remaining))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewCall creates a global function call invocation expression.
+//
+// Example:
+//
+// countByField(list, fieldName)
+// - function: countByField
+// - args: [list, fieldName]
+func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewCall(opt.nextID(), function, args...)
+}
+
+// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call.
+//
+// Example:
+//
+// list.countByField(fieldName)
+// - function: countByField
+// - target: list
+// - args: [fieldName]
+func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ return opt.fac.NewMemberCall(opt.nextID(), function, target, args...)
+}
+
+// NewIdent creates a new identifier expression.
+//
+// Examples:
+//
+// - simple_var_name
+// - qualified.subpackage.var_name
+func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr {
+ return opt.fac.NewIdent(opt.nextID(), name)
+}
+
+// NewLiteral creates a new literal expression value.
+//
+// The range of valid values for a literal generated during optimization is different than for expressions
+// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can
+// be converted back to a literal-like form.
+func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr {
+ return opt.fac.NewLiteral(opt.nextID(), value)
+}
+
+// NewList creates a list expression with a set of optional indices.
+//
+// Examples:
+//
+// [a, b]
+// - elems: [a, b]
+// - optIndices: []
+//
+// [a, ?b, ?c]
+// - elems: [a, b, c]
+// - optIndices: [1, 2]
+func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr {
+ return opt.fac.NewList(opt.nextID(), elems, optIndices)
+}
+
+// NewMap creates a map from a set of entry expressions which contain a key and value expression.
+func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewMap(opt.nextID(), entries)
+}
+
+// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the
+// entry is optional.
+//
+// Examples:
+//
+// {a: b}
+// - key: a
+// - value: b
+// - optional: false
+//
+// {?a: ?b}
+// - key: a
+// - value: b
+// - optional: true
+func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional)
+}
+
+// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded
+// has() macro call signature to be inserted into the source info macro call metadata.
+func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) {
+ sel := s.AsSelect()
+ astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName())
+ macroExpr = opt.fac.NewCall(0, "has",
+ opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName()))
+ opt.sanitizeMacro(macroID, macroExpr)
+ return
+}
+
+// NewSelect creates a select expression where a field value is selected from an operand.
+//
+// Example:
+//
+// msg.field_name
+// - operand: msg
+// - field: field_name
+func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr {
+ return opt.fac.NewSelect(opt.nextID(), operand, field)
+}
+
+// NewStruct creates a new typed struct value with an set of field initializations.
+//
+// Example:
+//
+// pkg.TypeName{field: value}
+// - typeName: pkg.TypeName
+// - fields: [{field: value}]
+func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr {
+ return opt.fac.NewStruct(opt.nextID(), typeName, fields)
+}
+
+// NewStructField creates a struct field initialization.
+//
+// Examples:
+//
+// {count: 3u}
+// - field: count
+// - value: 3u
+// - optional: false
+//
+// {?count: x}
+// - field: count
+// - value: x
+// - optional: true
+func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr {
+ return opt.fac.NewStructField(opt.nextID(), field, value, isOptional)
+}
+
+// UpdateExpr updates the target expression with the updated content while preserving macro metadata.
+//
+// There are four scenarios during the update to consider:
+// 1. target is not macro, updated is not macro
+// 2. target is macro, updated is not macro
+// 3. target is macro, updated is macro
+// 4. target is not macro, updated is macro
+//
+// When the target is a macro already, it may either be updated to a new macro function
+// body if the update is also a macro, or it may be removed altogether if the update is
+// a macro.
+//
+// When the update is a macro, then the target references within other macros must be
+// updated to point to the new updated macro. Otherwise, other macros which pointed to
+// the target body must be replaced with copies of the updated expression body.
+func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) {
+ // Update the expression
+ target.SetKindCase(updated)
+
+ // Early return if there's no macros present sa the source info reflects the
+ // macro set from the target and updated expressions.
+ if len(opt.sourceInfo.MacroCalls()) == 0 {
+ return
+ }
+ // Determine whether the target expression was a macro.
+ _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID())
+
+ // Determine whether the updated expression was a macro.
+ updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID())
+
+ if updatedIsMacro {
+ // If the updated call was a macro, then updated id maps to target id,
+ // and the updated macro moves into the target id slot.
+ opt.sourceInfo.ClearMacroCall(updated.ID())
+ opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro)
+ } else if targetIsMacro {
+ // Otherwise if the target expr was a macro, but is no longer, clear
+ // the macro reference.
+ opt.sourceInfo.ClearMacroCall(target.ID())
+ }
+
+ // Punch holes in the updated value where macros references exist.
+ macroExpr := opt.fac.CopyExpr(target)
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+
+ // Update any references to the expression within a macro
+ macroVisitor := ast.NewExprVisitor(func(call ast.Expr) {
+ // Update the target expression to point to the macro expression which
+ // will be empty if the updated expression was a macro.
+ if call.ID() == target.ID() {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Update the macro call expression if it refers to the updated expression
+ // id which has since been remapped to the target id.
+ if call.ID() == updated.ID() {
+ // Either ensure the expression is a macro reference or a populated with
+ // the relevant sub-expression if the updated expr was not a macro.
+ if updatedIsMacro {
+ call.SetKindCase(nil)
+ } else {
+ call.SetKindCase(opt.fac.CopyExpr(macroExpr))
+ }
+ // Since SetKindCase does not renumber the id, ensure the references to
+ // the old 'updated' id are mapped to the target id.
+ call.RenumberIDs(func(id int64) int64 {
+ if id == updated.ID() {
+ return target.ID()
+ }
+ return id
+ })
+ }
+ })
+ for _, call := range opt.sourceInfo.MacroCalls() {
+ ast.PostOrderVisit(call, macroVisitor)
+ }
+}
+
+func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) {
+ macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) {
+ if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID {
+ e.SetKindCase(nil)
+ }
+ })
+ ast.PostOrderVisit(macroExpr, macroRefVisitor)
+}
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
new file mode 100644
index 000000000..69c694263
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -0,0 +1,667 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+
+ "github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ descpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// These constants beginning with "Feature" enable optional behavior in
+// the library. See the documentation for each constant to see its
+// effects, compatibility restrictions, and standard conformance.
+const (
+ _ = iota
+
+ // Enable the tracking of function call expressions replaced by macros.
+ featureEnableMacroCallTracking
+
+ // Enable the use of cross-type numeric comparisons at the type-checker.
+ featureCrossTypeNumericComparisons
+
+ // Enable eager validation of declarations to ensure that Env values created
+ // with `Extend` inherit a validated list of declarations from the parent Env.
+ featureEagerlyValidateDeclarations
+
+ // Enable the use of the default UTC timezone when a timezone is not specified
+ // on a CEL timestamp operation. This fixes the scenario where the input time
+ // is not already in UTC.
+ featureDefaultUTCTimeZone
+
+ // Enable the serialization of logical operator ASTs as variadic calls, thus
+ // compressing the logic graph to a single call when multiple like-operator
+ // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
+ featureVariadicLogicalASTs
+
+ // Enable error generation when a presence test or optional field selection is
+ // performed on a primitive type.
+ featureEnableErrorOnBadPresenceTest
+)
+
+// EnvOption is a functional interface for configuring the environment.
+type EnvOption func(e *Env) (*Env, error)
+
+// ClearMacros options clears all parser macros.
+//
+// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
+// comprehensions such as `all` and `exists` are enabled only via macros.
+func ClearMacros() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = NoMacros
+ return e, nil
+ }
+}
+
+// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one.
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeAdapter(adapter types.Adapter) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.adapter = adapter
+ return e, nil
+ }
+}
+
+// CustomTypeProvider replaces the types.Provider implementation with a custom one.
+//
+// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated)
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeProvider(provider any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ var err error
+ e.provider, err = maybeInteropProvider(provider)
+ return e, err
+ }
+}
+
+// Declarations option extends the declaration set configured in the environment.
+//
+// Note: Declarations will by default be appended to the pre-existing declaration set configured
+// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
+// purely custom set of declarations use NewCustomEnv.
+func Declarations(decls ...*exprpb.Decl) EnvOption {
+ declOpts := []EnvOption{}
+ var err error
+ var opt EnvOption
+ // Convert the declarations to `EnvOption` values ahead of time.
+ // Surface any errors in conversion when the options are applied.
+ for _, d := range decls {
+ opt, err = ExprDeclToDeclaration(d)
+ if err != nil {
+ break
+ }
+ declOpts = append(declOpts, opt)
+ }
+ return func(e *Env) (*Env, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, o := range declOpts {
+ e, err = o(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return e, nil
+ }
+}
+
+// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught
+// at the time of the `NewEnv` call.
+//
+// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value.
+// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated
+// as declarations will be collision-checked at most once and only incrementally by way of `Extend`
+//
+// Disabled by default as not all environments are used for type-checking.
+func EagerlyValidateDeclarations(enabled bool) EnvOption {
+ return features(featureEagerlyValidateDeclarations, enabled)
+}
+
+// HomogeneousAggregateLiterals disables mixed type list and map literal values.
+//
+// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
+// expression, as well as via conversion of well-known dynamic types, or with unchecked
+// expressions.
+func HomogeneousAggregateLiterals() EnvOption {
+ return ASTValidators(ValidateHomogeneousAggregateLiterals())
+}
+
+// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single
+// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as
+// it will reduce the number of recursive calls needed to deserialize the AST later.
+//
+// For example, given the following expression the call graph will be rendered accordingly:
+//
+// expression: a && b && c && (d || e)
+// ast: call(_&&_, [a, b, c, call(_||_, [d, e])])
+func variadicLogicalOperatorASTs() EnvOption {
+ return features(featureVariadicLogicalASTs, true)
+}
+
+// Macros option extends the macro set configured in the environment.
+//
+// Note: This option must be specified after ClearMacros if used together.
+func Macros(macros ...Macro) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.macros = append(e.macros, macros...)
+ return e, nil
+ }
+}
+
+// Container sets the container for resolving variable names. Defaults to an empty container.
+//
+// If all references within an expression are relative to a protocol buffer package, then
+// specifying a container of `google.type` would make it possible to write expressions such as
+// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
+func Container(name string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Name(name))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+//
+// to the least qualified name.
+//
+// - Container references within the CEL program may be relative, and are resolved to fully
+//
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) EnvOption {
+ return func(e *Env) (*Env, error) {
+ cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...))
+ if err != nil {
+ return nil, err
+ }
+ e.Container = cont
+ return e, nil
+ }
+}
+
+// customTypeRegistry is an internal-only interface containing the minimum methods required to support
+// custom types. It is a subset of methods from ref.TypeRegistry.
+type customTypeRegistry interface {
+ RegisterDescriptor(protoreflect.FileDescriptor) error
+ RegisterType(...ref.Type) error
+}
+
+// Types adds one or more type declarations to the environment, allowing for construction of
+// type-literals whose definitions are included in the common expression built-in set.
+//
+// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
+// provided to this option will result in an error.
+//
+// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
+// environment by default.
+//
+// Note: This option must be specified after the CustomTypeProvider option when used together.
+func Types(addTypes ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ reg, isReg := e.provider.(customTypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ for _, t := range addTypes {
+ switch v := t.(type) {
+ case proto.Message:
+ fdMap := pb.CollectFileDescriptorSet(v)
+ for _, fd := range fdMap {
+ err := reg.RegisterDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ case ref.Type:
+ err := reg.RegisterType(v)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported type: %T", t)
+ }
+ }
+ return e, nil
+ }
+}
+
+// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files,
+// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided.
+//
+// Note that messages instantiated from these descriptors will be *dynamicpb.Message values
+// rather than the concrete message type.
+//
+// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
+// extension or by re-using the same EnvOption with another NewEnv() call.
+func TypeDescs(descs ...any) EnvOption {
+ return func(e *Env) (*Env, error) {
+ reg, isReg := e.provider.(customTypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a
+ // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other
+ // and will not resolve properly unless they are part of the same set.
+ var fds *descpb.FileDescriptorSet
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *descpb.FileDescriptorProto:
+ if fds == nil {
+ fds = &descpb.FileDescriptorSet{
+ File: []*descpb.FileDescriptorProto{},
+ }
+ }
+ fds.File = append(fds.File, f)
+ }
+ }
+ if fds != nil {
+ if err := registerFileSet(reg, fds); err != nil {
+ return nil, err
+ }
+ }
+ for _, d := range descs {
+ switch f := d.(type) {
+ case *protoregistry.Files:
+ if err := registerFiles(reg, f); err != nil {
+ return nil, err
+ }
+ case protoreflect.FileDescriptor:
+ if err := reg.RegisterDescriptor(f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorSet:
+ if err := registerFileSet(reg, f); err != nil {
+ return nil, err
+ }
+ case *descpb.FileDescriptorProto:
+ // skip, handled as a synthetic file descriptor set.
+ default:
+ return nil, fmt.Errorf("unsupported type descriptor: %T", d)
+ }
+ }
+ return e, nil
+ }
+}
+
+func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error {
+ files, err := protodesc.NewFiles(fileSet)
+ if err != nil {
+ return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err)
+ }
+ return registerFiles(reg, files)
+}
+
+func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error {
+ var err error
+ files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+ err = reg.RegisterDescriptor(fd)
+ return err == nil
+ })
+ return err
+}
+
+// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
+type ProgramOption func(p *prog) (*prog, error)
+
+// CustomDecorator appends an InterpreterDecorator to the program.
+//
+// InterpretableDecorators can be used to inspect, alter, or replace the Program plan.
+func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.decorators = append(p.decorators, dec)
+ return p, nil
+ }
+}
+
+// Functions adds function overloads that extend or override the set of CEL built-ins.
+//
+// Deprecated: use Function() instead to declare the function, its overload signatures,
+// and the overload implementations.
+func Functions(funcs ...*functions.Overload) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ if err := p.dispatcher.Add(funcs...); err != nil {
+ return nil, err
+ }
+ return p, nil
+ }
+}
+
+// Globals sets the global variable values for a given program. These values may be shadowed by
+// variables with the same name provided to the Eval() call. If Globals is used in a Library with
+// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
+//
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+func Globals(vars any) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ defaultVars, err := interpreter.NewActivation(vars)
+ if err != nil {
+ return nil, err
+ }
+ if p.defaultVars != nil {
+ defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars)
+ }
+ p.defaultVars = defaultVars
+ return p, nil
+ }
+}
+
+// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used
+// to compile regex string constants at program creation time and report any errors and then use the
+// compiled regex for all regex function invocations.
+func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...)
+ return p, nil
+ }
+}
+
+// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
+// in the output result.
+type EvalOption int
+
+const (
+ // OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
+ OptTrackState EvalOption = 1 << iota
+
+ // OptExhaustiveEval causes the runtime to disable short-circuits and track state.
+ OptExhaustiveEval EvalOption = 1< 0 {
+ decorators = append(decorators, interpreter.InterruptableEval())
+ }
+ // Enable constant folding first.
+ if p.evalOpts&OptOptimize == OptOptimize {
+ decorators = append(decorators, interpreter.Optimize())
+ p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization)
+ }
+ // Enable regex compilation of constants immediately after folding constants.
+ if len(p.regexOptimizations) > 0 {
+ decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
+ }
+
+ // Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
+ factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
+ costTracker.Estimator = p.callCostEstimator
+ costTracker.Limit = p.costLimit
+ for _, costOpt := range p.costOptions {
+ err := costOpt(costTracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
+ // prevents the underlying memory from being shared between factory function calls causing
+ // undesired mutations.
+ decs := decorators[:len(decorators):len(decorators)]
+ var observers []interpreter.EvalObserver
+
+ if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 {
+ // EvalStateObserver is required for OptExhaustiveEval.
+ observers = append(observers, interpreter.EvalStateObserver(state))
+ }
+ if p.evalOpts&OptTrackCost == OptTrackCost {
+ observers = append(observers, interpreter.CostObserver(costTracker))
+ }
+
+ // Enable exhaustive eval over a basic observer since it offers a superset of features.
+ if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval {
+ decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...))
+ } else if len(observers) > 0 {
+ decs = append(decs, interpreter.Observe(observers...))
+ }
+
+ return p.clone().initInterpretable(a, decs)
+ }
+ return newProgGen(factory)
+ }
+ return p.initInterpretable(a, decorators)
+}
+
+func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) {
+ // When the AST has been exprAST it contains metadata that can be used to speed up program execution.
+ interpretable, err := p.interpreter.NewInterpretable(a, decs...)
+ if err != nil {
+ return nil, err
+ }
+ p.interpretable = interpretable
+ return p, nil
+}
+
+// Eval implements the Program interface method.
+func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
+ // Configure error recovery for unexpected panics during evaluation. Note, the use of named
+ // return values makes it possible to modify the error response during the recovery
+ // function.
+ defer func() {
+ if r := recover(); r != nil {
+ switch t := r.(type) {
+ case interpreter.EvalCancelledError:
+ err = t
+ default:
+ err = fmt.Errorf("internal error: %v", r)
+ }
+ }
+ }()
+ // Build a hierarchical activation if there are default vars set.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = v
+ case map[string]any:
+ vars = activationPool.Setup(v)
+ defer activationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ if p.defaultVars != nil {
+ vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
+ }
+ v = p.interpretable.Eval(vars)
+ // The output of an internal Eval may have a value (`v`) that is a types.Err. This step
+ // translates the CEL value to a Go error response. This interface does not quite match the
+ // RPC signature which allows for multiple errors to be returned, but should be sufficient.
+ if types.IsError(v) {
+ err = v.(*types.Err)
+ }
+ return
+}
+
+// ContextEval implements the Program interface.
+func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which
+ // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state.
+ var vars interpreter.Activation
+ switch v := input.(type) {
+ case interpreter.Activation:
+ vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ case map[string]any:
+ rawVars := activationPool.Setup(v)
+ defer activationPool.Put(rawVars)
+ vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
+ defer ctxActivationPool.Put(vars)
+ default:
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
+ }
+ return p.Eval(vars)
+}
+
+// progFactory is a helper alias for marking a program creation factory function.
+type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
+
+// progGen holds a reference to a progFactory instance and implements the Program interface.
+type progGen struct {
+ factory progFactory
+}
+
+// newProgGen tests the factory object by calling it once and returns a factory-based Program if
+// the test is successful.
+func newProgGen(factory progFactory) (Program, error) {
+ // Test the factory to make sure that configuration errors are spotted at config
+ tracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, err
+ }
+ _, err = factory(interpreter.NewEvalState(), tracker)
+ if err != nil {
+ return nil, err
+ }
+ return &progGen{factory: factory}, nil
+}
+
+// Eval implements the Program interface method.
+func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.Eval(input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+// ContextEval implements the Program interface method.
+func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
+ if ctx == nil {
+ return nil, nil, fmt.Errorf("context can not be nil")
+ }
+ // The factory based Eval() differs from the standard evaluation model in that it generates a
+ // new EvalState instance for each call to ensure that unique evaluations yield unique stateful
+ // results.
+ state := interpreter.NewEvalState()
+ costTracker, err := interpreter.NewCostTracker(nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ det := &EvalDetails{state: state, costTracker: costTracker}
+
+ // Generate a new instance of the interpretable using the factory configured during the call to
+ // newProgram(). It is incredibly unlikely that the factory call will generate an error given
+ // the factory test performed within the Program() call.
+ p, err := gen.factory(state, costTracker)
+ if err != nil {
+ return nil, det, err
+ }
+
+ // Evaluate the input, returning the result and the 'state' within EvalDetails.
+ v, _, err := p.ContextEval(ctx, input)
+ if err != nil {
+ return v, det, err
+ }
+ return v, det, nil
+}
+
+type ctxEvalActivation struct {
+ parent interpreter.Activation
+ interrupt <-chan struct{}
+ interruptCheckCount uint
+ interruptCheckFrequency uint
+}
+
+// ResolveName implements the Activation interface method, but adds a special #interrupted variable
+// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
+func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
+ if name == "#interrupted" {
+ a.interruptCheckCount++
+ if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
+ select {
+ case <-a.interrupt:
+ return true, true
+ default:
+ return nil, false
+ }
+ }
+ return nil, false
+ }
+ return a.parent.ResolveName(name)
+}
+
+func (a *ctxEvalActivation) Parent() interpreter.Activation {
+ return a.parent
+}
+
+func newCtxEvalActivationPool() *ctxEvalActivationPool {
+ return &ctxEvalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &ctxEvalActivation{}
+ },
+ },
+ }
+}
+
+type ctxEvalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation with the ability check for context.Context cancellation
+func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation {
+ a := p.Pool.Get().(*ctxEvalActivation)
+ a.parent = vars
+ a.interrupt = done
+ a.interruptCheckCount = 0
+ a.interruptCheckFrequency = interruptCheckRate
+ return a
+}
+
+type evalActivation struct {
+ vars map[string]any
+ lazyVars map[string]any
+}
+
+// ResolveName looks up the value of the input variable name, if found.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The lazy binding will only be invoked once per evaluation.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func (a *evalActivation) ResolveName(name string) (any, bool) {
+ v, found := a.vars[name]
+ if !found {
+ return nil, false
+ }
+ switch obj := v.(type) {
+ case func() ref.Val:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ case func() any:
+ if resolved, found := a.lazyVars[name]; found {
+ return resolved, true
+ }
+ lazy := obj()
+ a.lazyVars[name] = lazy
+ return lazy, true
+ default:
+ return obj, true
+ }
+}
+
+// Parent implements the interpreter.Activation interface
+func (a *evalActivation) Parent() interpreter.Activation {
+ return nil
+}
+
+func newEvalActivationPool() *evalActivationPool {
+ return &evalActivationPool{
+ Pool: sync.Pool{
+ New: func() any {
+ return &evalActivation{lazyVars: make(map[string]any)}
+ },
+ },
+ }
+}
+
+type evalActivationPool struct {
+ sync.Pool
+}
+
+// Setup initializes a pooled Activation object with the map input.
+func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
+ a := p.Pool.Get().(*evalActivation)
+ a.vars = vars
+ return a
+}
+
+func (p *evalActivationPool) Put(value any) {
+ a := value.(*evalActivation)
+ for k := range a.lazyVars {
+ delete(a.lazyVars, k)
+ }
+ p.Pool.Put(a)
+}
+
+var (
+ // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
+ activationPool = newEvalActivationPool()
+
+ // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
+ ctxActivationPool = newCtxEvalActivationPool()
+)
diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go
new file mode 100644
index 000000000..b50c67452
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/validator.go
@@ -0,0 +1,375 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+)
+
+const (
+ homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
+
+ // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
+ // the set of function names which are exempt from homogeneous type checks. The expected type
+ // is a string list of function names.
+ //
+ // As an example, the `.format([args])` call expects the input arguments list to be
+ // comprised of a variety of types which correspond to the types expected by the format control
+ // clauses; however, all other uses of a mixed element type list, would be unexpected.
+ HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
+)
+
+// ASTValidators configures a set of ASTValidator instances into the target environment.
+//
+// Validators are applied in the order in which the are specified and are treated as singletons.
+// The same ASTValidator with a given name will not be applied more than once.
+func ASTValidators(validators ...ASTValidator) EnvOption {
+ return func(e *Env) (*Env, error) {
+ for _, v := range validators {
+ if !e.HasValidator(v.Name()) {
+ e.validators = append(e.validators, v)
+ }
+ }
+ return e, nil
+ }
+}
+
+// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment.
+//
+// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be
+// reported to the caller.
+type ASTValidator interface {
+ // Name returns the name of the validator. Names must be unique.
+ Name() string
+
+ // Validate validates a given Ast within an Environment and collects a set of potential issues.
+ //
+ // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to
+ // the invocation of the Validate call. The expectation is that the validator configuration
+ // is created in sequence and immutable once provided to the Validate call.
+ //
+ // See individual validators for more information on their configuration keys and configuration
+ // properties.
+ Validate(*Env, ValidatorConfig, *ast.AST, *Issues)
+}
+
+// ValidatorConfig provides an accessor method for querying validator configuration state.
+type ValidatorConfig interface {
+ GetOrDefault(name string, value any) any
+}
+
+// MutableValidatorConfig provides mutation methods for querying and updating validator configuration
+// settings.
+type MutableValidatorConfig interface {
+ ValidatorConfig
+ Set(name string, value any) error
+}
+
+// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator,
+// participates in validator configuration settings.
+//
+// This interface may be split from the expectation of being an ASTValidator instance in the future.
+type ASTValidatorConfigurer interface {
+ Configure(MutableValidatorConfig) error
+}
+
+// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces.
+type validatorConfig struct {
+ data map[string]any
+}
+
+// newValidatorConfig initializes the validator config with default values for core CEL validators.
+func newValidatorConfig() *validatorConfig {
+ return &validatorConfig{
+ data: map[string]any{
+ HomogeneousAggregateLiteralExemptFunctions: []string{},
+ },
+ }
+}
+
+// GetOrDefault returns the configured value for the name, if present, else the input default value.
+//
+// Note, the type-agreement between the input default and configured value is not checked on read.
+func (config *validatorConfig) GetOrDefault(name string, value any) any {
+ v, found := config.data[name]
+ if !found {
+ return value
+ }
+ return v
+}
+
+// Set configures a validator option with the given name and value.
+//
+// If the value had previously been set, the new value must have the same reflection type as the old one,
+// or the call will error.
+func (config *validatorConfig) Set(name string, value any) error {
+ v, found := config.data[name]
+ if found && reflect.TypeOf(v) != reflect.TypeOf(value) {
+ return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v)
+ }
+ config.data[name] = value
+ return nil
+}
+
+// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors.
+//
+// - Validate duration and timestamp literals
+// - Ensure regex strings are valid
+// - Disable mixed type list and map literals
+func ExtendedValidations() EnvOption {
+ return ASTValidators(
+ ValidateDurationLiterals(),
+ ValidateTimestampLiterals(),
+ ValidateRegexLiterals(),
+ ValidateHomogeneousAggregateLiterals(),
+ )
+}
+
+// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check.
+func ValidateDurationLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall)
+}
+
+// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check.
+func ValidateTimestampLiterals() ASTValidator {
+ return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall)
+}
+
+// ValidateRegexLiterals ensures that regex patterns are validated after type-check.
+func ValidateRegexLiterals() ASTValidator {
+ return newFormatValidator(overloads.Matches, 0, compileRegex)
+}
+
+// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e.
+// no mixed list element types or mixed map key or map value types.
+//
+// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all
+// literals which occur within string format calls.
+func ValidateHomogeneousAggregateLiterals() ASTValidator {
+ return homogeneousAggregateLiteralValidator{}
+}
+
+// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit.
+//
+// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial
+// time to complete.
+//
+// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have
+// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable
+// assignments and supplies an empty iteration range, so they won't count against the nesting limit either.
+func ValidateComprehensionNestingLimit(limit int) ASTValidator {
+ return nestingLimitValidator{limit: limit}
+}
+
+type argChecker func(env *Env, call, arg ast.Expr) error
+
+func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
+ return formatValidator{
+ funcName: funcName,
+ check: check,
+ argNum: argNum,
+ }
+}
+
+type formatValidator struct {
+ funcName string
+ argNum int
+ check argChecker
+}
+
+// Name returns the unique name of this function format validator.
+func (v formatValidator) Name() string {
+ return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
+}
+
+// Validate searches the AST for uses of a given function name with a constant argument and performs a check
+// on whether the argument is a valid literal value.
+func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
+ funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
+ for _, call := range funcCalls {
+ callArgs := call.AsCall().Args()
+ if len(callArgs) <= v.argNum {
+ continue
+ }
+ litArg := callArgs[v.argNum]
+ if litArg.Kind() != ast.LiteralKind {
+ continue
+ }
+ if err := v.check(e, call, litArg); err != nil {
+ iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName)
+ }
+ }
+}
+
+func evalCall(env *Env, call, arg ast.Expr) error {
+ ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))}
+ prg, err := env.Program(ast)
+ if err != nil {
+ return err
+ }
+ _, _, err = prg.Eval(NoVars())
+ return err
+}
+
+func compileRegex(_ *Env, _, arg ast.Expr) error {
+ pattern := arg.AsLiteral().Value().(string)
+ _, err := regexp.Compile(pattern)
+ return err
+}
+
+type homogeneousAggregateLiteralValidator struct{}
+
+// Name returns the unique name of the homogeneous type validator.
+func (homogeneousAggregateLiteralValidator) Name() string {
+ return homogeneousValidatorName
+}
+
+// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
+//
+// This validator makes an exception for list and map literals which occur at any level of nesting within
+// string format calls.
+func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) {
+ var exemptedFunctions []string
+ exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
+ root := ast.NavigateAST(a)
+ listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
+ for _, listExpr := range listExprs {
+ if inExemptFunction(listExpr, exemptedFunctions) {
+ continue
+ }
+ l := listExpr.AsList()
+ elements := l.Elements()
+ optIndices := l.OptionalIndices()
+ var elemType *Type
+ for i, e := range elements {
+ et := a.GetType(e.ID())
+ if isOptionalIndex(i, optIndices) {
+ et = et.Parameters()[0]
+ }
+ if elemType == nil {
+ elemType = et
+ continue
+ }
+ if !elemType.IsEquivalentType(et) {
+ v.typeMismatch(iss, e.ID(), elemType, et)
+ break
+ }
+ }
+ }
+ mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind))
+ for _, mapExpr := range mapExprs {
+ if inExemptFunction(mapExpr, exemptedFunctions) {
+ continue
+ }
+ m := mapExpr.AsMap()
+ entries := m.Entries()
+ var keyType, valType *Type
+ for _, e := range entries {
+ mapEntry := e.AsMapEntry()
+ key, val := mapEntry.Key(), mapEntry.Value()
+ kt, vt := a.GetType(key.ID()), a.GetType(val.ID())
+ if mapEntry.IsOptional() {
+ vt = vt.Parameters()[0]
+ }
+ if keyType == nil && valType == nil {
+ keyType, valType = kt, vt
+ continue
+ }
+ if !keyType.IsEquivalentType(kt) {
+ v.typeMismatch(iss, key.ID(), keyType, kt)
+ }
+ if !valType.IsEquivalentType(vt) {
+ v.typeMismatch(iss, val.ID(), valType, vt)
+ }
+ }
+ }
+}
+
+func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
+ parent, found := e.Parent()
+ for found {
+ if parent.Kind() == ast.CallKind {
+ fnName := parent.AsCall().FunctionName()
+ for _, exempt := range exemptFunctions {
+ if exempt == fnName {
+ return true
+ }
+ }
+ }
+ parent, found = parent.Parent()
+ }
+ return false
+}
+
+func isOptionalIndex(i int, optIndices []int32) bool {
+ for _, optInd := range optIndices {
+ if i == int(optInd) {
+ return true
+ }
+ }
+ return false
+}
+
+func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) {
+ iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual))
+}
+
+type nestingLimitValidator struct {
+ limit int
+}
+
+func (v nestingLimitValidator) Name() string {
+ return "cel.lib.std.validate.comprehension_nesting_limit"
+}
+
+func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) {
+ root := ast.NavigateAST(a)
+ comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
+ if len(comprehensions) <= v.limit {
+ return
+ }
+ for _, comp := range comprehensions {
+ count := 0
+ e := comp
+ hasParent := true
+ for hasParent {
+ // When the expression is not a comprehension, continue to the next ancestor.
+ if e.Kind() != ast.ComprehensionKind {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // When the comprehension has an empty range, continue to the next ancestor
+ // as this comprehension does not have any associated cost.
+ iterRange := e.AsComprehension().IterRange()
+ if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 {
+ e, hasParent = e.Parent()
+ continue
+ }
+ // Otherwise check the nesting limit.
+ count++
+ if count > v.limit {
+ iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit")
+ break
+ }
+ e, hasParent = e.Parent()
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel
new file mode 100644
index 000000000..678b412a9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -0,0 +1,64 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checker.go",
+ "cost.go",
+ "env.go",
+ "errors.go",
+ "format.go",
+ "mapping.go",
+ "options.go",
+ "printer.go",
+ "scopes.go",
+ "types.go",
+ ],
+ importpath = "github.com/google/cel-go/checker",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "checker_test.go",
+ "cost_test.go",
+ "env_test.go",
+ "format_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/types:go_default_library",
+ "//parser:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go
new file mode 100644
index 000000000..0603cfa30
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/checker.go
@@ -0,0 +1,711 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package checker defines functions to type-checked a parsed expression
+// against a set of identifier and function declarations.
+package checker
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type checker struct {
+ *ast.AST
+ ast.ExprFactory
+ env *Env
+ errors *typeErrors
+ mappings *mapping
+ freeTypeVarCounter int
+}
+
+// Check performs type checking, giving a typed AST.
+//
+// The input is a parsed AST and an env which encapsulates type binding of variables,
+// declarations of built-in functions, descriptions of protocol buffers, and a registry for
+// errors.
+//
+// Returns a type-checked AST, which might not be usable if there are errors in the error
+// registry.
+func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) {
+ errs := common.NewErrors(source)
+ typeMap := make(map[int64]*types.Type)
+ refMap := make(map[int64]*ast.ReferenceInfo)
+ c := checker{
+ AST: ast.NewCheckedAST(parsed, typeMap, refMap),
+ ExprFactory: ast.NewExprFactory(),
+ env: env,
+ errors: &typeErrors{errs: errs},
+ mappings: newMapping(),
+ freeTypeVarCounter: 0,
+ }
+ c.check(c.Expr())
+
+ // Walk over the final type map substituting any type parameters either by their bound value
+ // or by DYN.
+ for id, t := range c.TypeMap() {
+ c.SetType(id, substitute(c.mappings, t, true))
+ }
+ return c.AST, errs
+}
+
+func (c *checker) check(e ast.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := ref.Val(e.AsLiteral())
+ switch literal.Type() {
+ case types.BoolType, types.BytesType, types.DoubleType, types.IntType,
+ types.NullType, types.StringType, types.UintType:
+ c.setType(e, literal.Type().(*types.Type))
+ default:
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName())
+ }
+ case ast.IdentKind:
+ c.checkIdent(e)
+ case ast.SelectKind:
+ c.checkSelect(e)
+ case ast.CallKind:
+ c.checkCall(e)
+ case ast.ListKind:
+ c.checkCreateList(e)
+ case ast.MapKind:
+ c.checkCreateMap(e)
+ case ast.StructKind:
+ c.checkCreateStruct(e)
+ case ast.ComprehensionKind:
+ c.checkComprehension(e)
+ default:
+ c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name())
+ }
+}
+
+func (c *checker) checkIdent(e ast.Expr) {
+ identName := e.AsIdent()
+ // Check to see if the identifier is declared.
+ if ident := c.env.LookupIdent(identName); ident != nil {
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ // Overwrite the identifier with its fully qualified name.
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ return
+ }
+
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName)
+}
+
+func (c *checker) checkSelect(e ast.Expr) {
+ sel := e.AsSelect()
+ // Before traversing down the tree, try to interpret as qualified name.
+ qname, found := containers.ToQualifiedName(e)
+ if found {
+ ident := c.env.LookupIdent(qname)
+ if ident != nil {
+ // We don't check for a TestOnly expression here since the `found` result is
+ // always going to be false for TestOnly expressions.
+
+ // Rewrite the node to be a variable reference to the resolved fully-qualified
+ // variable name.
+ c.setType(e, ident.Type())
+ c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
+ e.SetKindCase(c.NewIdent(e.ID(), ident.Name()))
+ return
+ }
+ }
+
+ resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false)
+ if sel.IsTestOnly() {
+ resultType = types.BoolType
+ }
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkOptSelect(e ast.Expr) {
+ // Collect metadata related to the opt select call packaged by the parser.
+ call := e.AsCall()
+ operand := call.Args()[0]
+ field := call.Args()[1]
+ fieldName, isString := maybeUnwrapString(field)
+ if !isString {
+ c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field)
+ return
+ }
+
+ // Perform type-checking using the field selection logic.
+ resultType := c.checkSelectField(e, operand, fieldName, true)
+ c.setType(e, substitute(c.mappings, resultType, false))
+ c.setReference(e, ast.NewFunctionReference("select_optional_field"))
+}
+
+func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type {
+ // Interpret as field selection, first traversing down the operand.
+ c.check(operand)
+ operandType := substitute(c.mappings, c.getType(operand), false)
+
+ // If the target type is 'optional', unwrap it for the sake of this check.
+ targetType, isOpt := maybeUnwrapOptional(operandType)
+
+ // Assume error type by default as most types do not support field selection.
+ resultType := types.ErrorType
+ switch targetType.Kind() {
+ case types.MapKind:
+ // Maps yield their value type as the selection result type.
+ resultType = targetType.Parameters()[1]
+ case types.StructKind:
+ // Objects yield their field type declaration as the selection result type, but only if
+ // the field is defined.
+ messageType := targetType
+ if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found {
+ resultType = fieldType
+ }
+ case types.TypeParamKind:
+ // Set the operand type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, targetType)
+ // Also, set the result type to DYN.
+ resultType = types.DynType
+ default:
+ // Dynamic / error values are treated as DYN type. Errors are handled this way as well
+ // in order to allow forward progress on the check.
+ if !isDynOrError(targetType) {
+ c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType)
+ }
+ resultType = types.DynType
+ }
+
+ // If the target type was optional coming in, then the result must be optional going out.
+ if isOpt || optional {
+ return types.NewOptionalType(resultType)
+ }
+ return resultType
+}
+
+func (c *checker) checkCall(e ast.Expr) {
+ // Note: similar logic exists within the `interpreter/planner.go`. If making changes here
+ // please consider the impact on planner.go and consolidate implementations or mirror code
+ // as appropriate.
+ call := e.AsCall()
+ fnName := call.FunctionName()
+ if fnName == operators.OptSelect {
+ c.checkOptSelect(e)
+ return
+ }
+
+ args := call.Args()
+ // Traverse arguments.
+ for _, arg := range args {
+ c.check(arg)
+ }
+
+ // Regular static call with simple name.
+ if !call.IsMemberFunction() {
+ // Check for the existence of the function.
+ fn := c.env.LookupFunction(fnName)
+ if fn == nil {
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overwrite the function name with its fully qualified resolved name.
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ // Check to see whether the overload resolves.
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+
+ // If a receiver 'target' is present, it may either be a receiver function, or a namespaced
+ // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with
+ // target a.b.
+ //
+ // Check whether the target is a namespaced function name.
+ target := call.Target()
+ qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target)
+ if maybeQualified {
+ maybeQualifiedName := qualifiedPrefix + "." + fnName
+ fn := c.env.LookupFunction(maybeQualifiedName)
+ if fn != nil {
+ // The function name is namespaced and so preserving the target operand would
+ // be an inaccurate representation of the desired evaluation behavior.
+ // Overwrite with fully-qualified resolved function name sans receiver target.
+ e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...))
+ c.resolveOverloadOrError(e, fn, nil, args)
+ return
+ }
+ }
+
+ // Regular instance call.
+ c.check(target)
+ fn := c.env.LookupFunction(fnName)
+ // Function found, attempt overload resolution.
+ if fn != nil {
+ c.resolveOverloadOrError(e, fn, target, args)
+ return
+ }
+ // Function name not declared, record error.
+ c.setType(e, types.ErrorType)
+ c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName)
+}
+
+func (c *checker) resolveOverloadOrError(
+ e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) {
+ // Attempt to resolve the overload.
+ resolution := c.resolveOverload(e, fn, target, args)
+ // No such overload, error noted in the resolveOverload call, type recorded here.
+ if resolution == nil {
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Overload found.
+ c.setType(e, resolution.Type)
+ c.setReference(e, resolution.Reference)
+}
+
+func (c *checker) resolveOverload(
+ call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution {
+
+ var argTypes []*types.Type
+ if target != nil {
+ argTypes = append(argTypes, c.getType(target))
+ }
+ for _, arg := range args {
+ argTypes = append(argTypes, c.getType(arg))
+ }
+
+ var resultType *types.Type
+ var checkedRef *ast.ReferenceInfo
+ for _, overload := range fn.OverloadDecls() {
+ // Determine whether the overload is currently considered.
+ if c.env.isOverloadDisabled(overload.ID()) {
+ continue
+ }
+
+ // Ensure the call style for the overload matches.
+ if (target == nil && overload.IsMemberFunction()) ||
+ (target != nil && !overload.IsMemberFunction()) {
+ // not a compatible call style.
+ continue
+ }
+
+ // Alternative type-checking behavior when the logical operators are compacted into
+ // variadic AST representations.
+ if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ for i, argType := range argTypes {
+ if !c.isAssignable(argType, types.BoolType) {
+ c.errors.typeMismatch(
+ args[i].ID(),
+ c.locationByID(args[i].ID()),
+ types.BoolType,
+ argType)
+ resultType = types.ErrorType
+ }
+ }
+ if isError(resultType) {
+ return nil
+ }
+ return newResolution(checkedRef, types.BoolType)
+ }
+
+ overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...)
+ typeParams := overload.TypeParams()
+ if len(typeParams) != 0 {
+ // Instantiate overload's type with fresh type variables.
+ substitutions := newMapping()
+ for _, typePar := range typeParams {
+ substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar())
+ }
+ overloadType = substitute(substitutions, overloadType, false)
+ }
+
+ candidateArgTypes := overloadType.Parameters()[1:]
+ if c.isAssignableList(argTypes, candidateArgTypes) {
+ if checkedRef == nil {
+ checkedRef = ast.NewFunctionReference(overload.ID())
+ } else {
+ checkedRef.AddOverload(overload.ID())
+ }
+
+ // First matching overload, determines result type.
+ fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false)
+ if resultType == nil {
+ resultType = fnResultType
+ } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) {
+ resultType = types.DynType
+ }
+ }
+ }
+
+ if resultType == nil {
+ for i, argType := range argTypes {
+ argTypes[i] = substitute(c.mappings, argType, true)
+ }
+ c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil)
+ return nil
+ }
+
+ return newResolution(checkedRef, resultType)
+}
+
+func (c *checker) checkCreateList(e ast.Expr) {
+ create := e.AsList()
+ var elemsType *types.Type
+ optionalIndices := create.OptionalIndices()
+ optionals := make(map[int32]bool, len(optionalIndices))
+ for _, optInd := range optionalIndices {
+ optionals[optInd] = true
+ }
+ for i, e := range create.Elements() {
+ c.check(e)
+ elemType := c.getType(e)
+ if optionals[int32(i)] {
+ var isOptional bool
+ elemType, isOptional = maybeUnwrapOptional(elemType)
+ if !isOptional && !isDyn(elemType) {
+ c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType)
+ }
+ }
+ elemsType = c.joinTypes(e, elemsType, elemType)
+ }
+ if elemsType == nil {
+ // If the list is empty, assign free type var to elem type.
+ elemsType = c.newTypeVar()
+ }
+ c.setType(e, types.NewListType(elemsType))
+}
+
+func (c *checker) checkCreateMap(e ast.Expr) {
+ mapVal := e.AsMap()
+ var mapKeyType *types.Type
+ var mapValueType *types.Type
+ for _, e := range mapVal.Entries() {
+ entry := e.AsMapEntry()
+ key := entry.Key()
+ c.check(key)
+ mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
+
+ val := entry.Value()
+ c.check(val)
+ valType := c.getType(val)
+ if entry.IsOptional() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType)
+ }
+ }
+ mapValueType = c.joinTypes(val, mapValueType, valType)
+ }
+ if mapKeyType == nil {
+ // If the map is empty, assign free type variables to typeKey and value type.
+ mapKeyType = c.newTypeVar()
+ mapValueType = c.newTypeVar()
+ }
+ c.setType(e, types.NewMapType(mapKeyType, mapValueType))
+}
+
+func (c *checker) checkCreateStruct(e ast.Expr) {
+ msgVal := e.AsStruct()
+ // Determine the type of the message.
+ resultType := types.ErrorType
+ ident := c.env.LookupIdent(msgVal.TypeName())
+ if ident == nil {
+ c.errors.undeclaredReference(
+ e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName())
+ c.setType(e, types.ErrorType)
+ return
+ }
+ // Ensure the type name is fully qualified in the AST.
+ typeName := ident.Name()
+ if msgVal.TypeName() != typeName {
+ e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields()))
+ msgVal = e.AsStruct()
+ }
+ c.setReference(e, ast.NewIdentReference(typeName, nil))
+ identKind := ident.Type().Kind()
+ if identKind != types.ErrorKind {
+ if identKind != types.TypeKind {
+ c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName())
+ } else {
+ resultType = ident.Type().Parameters()[0]
+ // Backwards compatibility test between well-known types and message types
+ // In this context, the type is being instantiated by its protobuf name which
+ // is not ideal or recommended, but some users expect this to work.
+ if isWellKnownType(resultType) {
+ typeName = getWellKnownTypeName(resultType)
+ } else if resultType.Kind() == types.StructKind {
+ typeName = resultType.DeclaredTypeName()
+ } else {
+ c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName())
+ resultType = types.ErrorType
+ }
+ }
+ }
+ c.setType(e, resultType)
+
+ // Check the field initializers.
+ for _, f := range msgVal.Fields() {
+ field := f.AsStructField()
+ fieldName := field.Name()
+ value := field.Value()
+ c.check(value)
+
+ fieldType := types.ErrorType
+ ft, found := c.lookupFieldType(f.ID(), typeName, fieldName)
+ if found {
+ fieldType = ft
+ }
+
+ valType := c.getType(value)
+ if field.IsOptional() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType)
+ }
+ }
+ if !c.isAssignable(fieldType, valType) {
+ c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType)
+ }
+ }
+}
+
+func (c *checker) checkComprehension(e ast.Expr) {
+ comp := e.AsComprehension()
+ c.check(comp.IterRange())
+ c.check(comp.AccuInit())
+ rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false)
+
+ // Create a scope for the comprehension since it has a local accumulation variable.
+ // This scope will contain the accumulation variable used to compute the result.
+ accuType := c.getType(comp.AccuInit())
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType))
+
+ var varType, var2Type *types.Type
+ switch rangeType.Kind() {
+ case types.ListKind:
+ // varType represents the list element type for one-variable comprehensions.
+ varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // varType represents the list index (int) for two-variable comprehensions,
+ // and var2Type represents the list element type.
+ var2Type = varType
+ varType = types.IntType
+ }
+ case types.MapKind:
+ // varType represents the map entry key for all comprehension types.
+ varType = rangeType.Parameters()[0]
+ if comp.HasIterVar2() {
+ // var2Type represents the map entry value for two-variable comprehensions.
+ var2Type = rangeType.Parameters()[1]
+ }
+ case types.DynKind, types.ErrorKind, types.TypeParamKind:
+ // Set the range type to DYN to prevent assignment to a potentially incorrect type
+ // at a later point in type-checking. The isAssignable call will update the type
+ // substitutions for the type param under the covers.
+ c.isAssignable(types.DynType, rangeType)
+ // Set the range iteration variable to type DYN as well.
+ varType = types.DynType
+ default:
+ c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType)
+ varType = types.ErrorType
+ }
+
+ // Create a block scope for the loop.
+ c.env = c.env.enterScope()
+ c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType))
+ if comp.HasIterVar2() {
+ c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type))
+ }
+ // Check the variable references in the condition and step.
+ c.check(comp.LoopCondition())
+ c.assertType(comp.LoopCondition(), types.BoolType)
+ c.check(comp.LoopStep())
+ c.assertType(comp.LoopStep(), accuType)
+ // Exit the loop's block scope before checking the result.
+ c.env = c.env.exitScope()
+ c.check(comp.Result())
+ // Exit the comprehension scope.
+ c.env = c.env.exitScope()
+ c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false))
+}
+
+// Checks compatibility of joined types, and returns the most general common type.
+func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type {
+ if previous == nil {
+ return current
+ }
+ if c.isAssignable(previous, current) {
+ return mostGeneral(previous, current)
+ }
+ if c.dynAggregateLiteralElementTypesEnabled() {
+ return types.DynType
+ }
+ c.errors.typeMismatch(e.ID(), c.location(e), previous, current)
+ return types.ErrorType
+}
+
+func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
+ return c.env.aggLitElemType == dynElementType
+}
+
+func (c *checker) newTypeVar() *types.Type {
+ id := c.freeTypeVarCounter
+ c.freeTypeVarCounter++
+ return types.NewTypeParamType(fmt.Sprintf("_var%d", id))
+}
+
+func (c *checker) isAssignable(t1, t2 *types.Type) bool {
+ subs := isAssignable(c.mappings, t1, t2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
+ subs := isAssignableList(c.mappings, l1, l2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func maybeUnwrapString(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.LiteralKind:
+ literal := e.AsLiteral()
+ switch v := literal.(type) {
+ case types.String:
+ return string(v), true
+ }
+ }
+ return "", false
+}
+
+func (c *checker) setType(e ast.Expr, t *types.Type) {
+ if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) {
+ c.errors.incompatibleType(e.ID(), c.location(e), e, old, t)
+ return
+ }
+ c.SetType(e.ID(), t)
+}
+
+func (c *checker) getType(e ast.Expr) *types.Type {
+ return c.TypeMap()[e.ID()]
+}
+
+func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) {
+ if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) {
+ c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r)
+ return
+ }
+ c.SetReference(e.ID(), r)
+}
+
+func (c *checker) assertType(e ast.Expr, t *types.Type) {
+ if !c.isAssignable(t, c.getType(e)) {
+ c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e))
+ }
+}
+
+type overloadResolution struct {
+ Type *types.Type
+ Reference *ast.ReferenceInfo
+}
+
+func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
+ return &overloadResolution{
+ Reference: r,
+ Type: t,
+ }
+}
+
+func (c *checker) location(e ast.Expr) common.Location {
+ return c.locationByID(e.ID())
+}
+
+func (c *checker) locationByID(id int64) common.Location {
+ return c.SourceInfo().GetStartLocation(id)
+}
+
+func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
+ if _, found := c.env.provider.FindStructType(structType); !found {
+ // This should not happen, anyway, report an error.
+ c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType)
+ return nil, false
+ }
+
+ if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found {
+ return ft.Type, found
+ }
+
+ c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName)
+ return nil, false
+}
+
+func isWellKnownType(t *types.Type) bool {
+ switch t.Kind() {
+ case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind:
+ return true
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind:
+ return t.IsAssignableType(types.NullType)
+ case types.ListKind:
+ return t.Parameters()[0] == types.DynType
+ case types.MapKind:
+ return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType
+ }
+ return false
+}
+
+func getWellKnownTypeName(t *types.Type) string {
+ if name, found := wellKnownTypes[t.Kind()]; found {
+ return name
+ }
+ return ""
+}
+
+var (
+ wellKnownTypes = map[types.Kind]string{
+ types.AnyKind: "google.protobuf.Any",
+ types.BoolKind: "google.protobuf.BoolValue",
+ types.BytesKind: "google.protobuf.BytesValue",
+ types.DoubleKind: "google.protobuf.DoubleValue",
+ types.DurationKind: "google.protobuf.Duration",
+ types.DynKind: "google.protobuf.Value",
+ types.IntKind: "google.protobuf.Int64Value",
+ types.ListKind: "google.protobuf.ListValue",
+ types.NullTypeKind: "google.protobuf.NullValue",
+ types.MapKind: "google.protobuf.Struct",
+ types.StringKind: "google.protobuf.StringValue",
+ types.TimestampKind: "google.protobuf.Timestamp",
+ types.UintKind: "google.protobuf.UInt64Value",
+ }
+)
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
new file mode 100644
index 000000000..04244694d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -0,0 +1,705 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+)
+
+// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go
+
+// CostEstimator estimates the sizes of variable length input data and the costs of functions.
+type CostEstimator interface {
+ // EstimateSize returns a SizeEstimate for the given AstNode, or nil if
+ // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
+ // length of strings and bytes, number of map entries or number of list items.
+ // EstimateSize is only called for AstNodes where
+ // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
+ // is already obvious to CEL.
+ EstimateSize(element AstNode) *SizeEstimate
+ // EstimateCallCost returns the estimated cost of an invocation, or nil if
+ // the estimator has no estimate to provide.
+ EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
+}
+
+// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size.
+// The ResultSize should only be provided if the call results in a map, list, string or bytes.
+type CallEstimate struct {
+ CostEstimate
+ ResultSize *SizeEstimate
+}
+
+// AstNode represents an AST node for the purpose of cost estimations.
+type AstNode interface {
+ // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not
+ // represent type directly reachable from the provided type declarations.
+ // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
+ Path() []string
+ // Type returns the deduced type of the AstNode.
+ Type() *types.Type
+ // Expr returns the expression of the AstNode.
+ Expr() ast.Expr
+ // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
+ // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
+ // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
+ // computed size available.
+ ComputedSize() *SizeEstimate
+}
+
+type astNode struct {
+ path []string
+ t *types.Type
+ expr ast.Expr
+ derivedSize *SizeEstimate
+}
+
+func (e astNode) Path() []string {
+ return e.path
+}
+
+func (e astNode) Type() *types.Type {
+ return e.t
+}
+
+func (e astNode) Expr() ast.Expr {
+ return e.expr
+}
+
+func (e astNode) ComputedSize() *SizeEstimate {
+ if e.derivedSize != nil {
+ return e.derivedSize
+ }
+ var v uint64
+ switch e.expr.Kind() {
+ case ast.LiteralKind:
+ switch ck := e.expr.AsLiteral().(type) {
+ case types.String:
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck)))
+ case types.Bytes:
+ v = uint64(len(ck))
+ case types.Bool, types.Double, types.Duration,
+ types.Int, types.Timestamp, types.Uint,
+ types.Null:
+ v = uint64(1)
+ default:
+ return nil
+ }
+ case ast.ListKind:
+ v = uint64(e.expr.AsList().Size())
+ case ast.MapKind:
+ v = uint64(e.expr.AsMap().Size())
+ default:
+ return nil
+ }
+
+ return &SizeEstimate{Min: v, Max: v}
+}
+
+// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
+type SizeEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds to another SizeEstimate and returns the sum.
+// If add would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ addUint64NoOverflow(se.Min, sizeEstimate.Min),
+ addUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// Multiply multiplies by another SizeEstimate and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate {
+ return SizeEstimate{
+ multiplyUint64NoOverflow(se.Min, sizeEstimate.Min),
+ multiplyUint64NoOverflow(se.Max, sizeEstimate.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(se.Min, costPerUnit),
+ multiplyByCostFactor(se.Max, costPerUnit),
+ }
+}
+
+// MultiplyByCost multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(se.Min, cost.Min),
+ multiplyUint64NoOverflow(se.Max, cost.Max),
+ }
+}
+
+// Union returns a SizeEstimate that encompasses both input the SizeEstimate.
+func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate {
+ result := se
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// CostEstimate represents an estimated cost range and provides add and multiply operations
+// that do not overflow.
+type CostEstimate struct {
+ Min, Max uint64
+}
+
+// Add adds the costs and returns the sum.
+// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
+func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ addUint64NoOverflow(ce.Min, cost.Min),
+ addUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// Multiply multiplies by the cost and returns the product.
+// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
+func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
+ return CostEstimate{
+ multiplyUint64NoOverflow(ce.Min, cost.Min),
+ multiplyUint64NoOverflow(ce.Max, cost.Max),
+ }
+}
+
+// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the
+// nearest integer of the result, rounded up.
+func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
+ return CostEstimate{
+ multiplyByCostFactor(ce.Min, costPerUnit),
+ multiplyByCostFactor(ce.Max, costPerUnit),
+ }
+}
+
+// Union returns a CostEstimate that encompasses both input the CostEstimates.
+func (ce CostEstimate) Union(size CostEstimate) CostEstimate {
+ result := ce
+ if size.Min < result.Min {
+ result.Min = size.Min
+ }
+ if size.Max > result.Max {
+ result.Max = size.Max
+ }
+ return result
+}
+
+// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func addUint64NoOverflow(x, y uint64) uint64 {
+ if y > 0 && x > math.MaxUint64-y {
+ return math.MaxUint64
+ }
+ return x + y
+}
+
+// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
+// is returned.
+func multiplyUint64NoOverflow(x, y uint64) uint64 {
+ if y != 0 && x > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ return x * y
+}
+
+// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up.
+func multiplyByCostFactor(x uint64, y float64) uint64 {
+ xFloat := float64(x)
+ if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
+ return math.MaxUint64
+ }
+ ceil := math.Ceil(xFloat * y)
+ if ceil >= doubleTwoTo64 {
+ return math.MaxUint64
+ }
+ return uint64(ceil)
+}
+
+var (
+ selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
+ constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
+
+ createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
+ createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
+ createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
+)
+
+type coster struct {
+ // exprPath maps from Expr Id to field path.
+ exprPath map[int64][]string
+ // iterRanges tracks the iterRange of each iterVar.
+ iterRanges iterRangeScopes
+ // computedSizes tracks the computed sizes of call results.
+ computedSizes map[int64]SizeEstimate
+ checkedAST *ast.AST
+ estimator CostEstimator
+ overloadEstimators map[string]FunctionEstimator
+ // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
+ presenceTestCost CostEstimate
+}
+
+// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
+type iterRangeScopes map[string][]int64
+
+func (vs iterRangeScopes) push(varName string, expr ast.Expr) {
+ vs[varName] = append(vs[varName], expr.ID())
+}
+
+func (vs iterRangeScopes) pop(varName string) {
+ varStack := vs[varName]
+ vs[varName] = varStack[:len(varStack)-1]
+}
+
+func (vs iterRangeScopes) peek(varName string) (int64, bool) {
+ varStack := vs[varName]
+ if len(varStack) > 0 {
+ return varStack[len(varStack)-1], true
+ }
+ return 0, false
+}
+
+// CostOption configures flags which affect cost computations.
+type CostOption func(*coster) error
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+//
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostOption {
+ return func(c *coster) error {
+ if hasCost {
+ c.presenceTestCost = selectAndIdentCost
+ return nil
+ }
+ c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
+ return nil
+ }
+}
+
+// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair.
+type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate
+
+// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID.
+//
+// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to
+// the Cost() call.
+func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption {
+ return func(c *coster) error {
+ c.overloadEstimators[overloadID] = functionCoster
+ return nil
+ }
+}
+
+// Cost estimates the cost of the parsed and type checked CEL expression.
+func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+ c := &coster{
+ checkedAST: checked,
+ estimator: estimator,
+ overloadEstimators: map[string]FunctionEstimator{},
+ exprPath: map[int64][]string{},
+ iterRanges: map[string][]int64{},
+ computedSizes: map[int64]SizeEstimate{},
+ presenceTestCost: CostEstimate{Min: 1, Max: 1},
+ }
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return CostEstimate{}, err
+ }
+ }
+ return c.cost(checked.Expr()), nil
+}
+
+func (c *coster) cost(e ast.Expr) CostEstimate {
+ if e == nil {
+ return CostEstimate{}
+ }
+ var cost CostEstimate
+ switch e.Kind() {
+ case ast.LiteralKind:
+ cost = constCost
+ case ast.IdentKind:
+ cost = c.costIdent(e)
+ case ast.SelectKind:
+ cost = c.costSelect(e)
+ case ast.CallKind:
+ cost = c.costCall(e)
+ case ast.ListKind:
+ cost = c.costCreateList(e)
+ case ast.MapKind:
+ cost = c.costCreateMap(e)
+ case ast.StructKind:
+ cost = c.costCreateStruct(e)
+ case ast.ComprehensionKind:
+ cost = c.costComprehension(e)
+ default:
+ return CostEstimate{}
+ }
+ return cost
+}
+
+func (c *coster) costIdent(e ast.Expr) CostEstimate {
+ identName := e.AsIdent()
+ // build and track the field path
+ if iterRange, ok := c.iterRanges.peek(identName); ok {
+ switch c.checkedAST.GetType(iterRange).Kind() {
+ case types.ListKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@items"))
+ case types.MapKind:
+ c.addPath(e, append(c.exprPath[iterRange], "@keys"))
+ }
+ } else {
+ c.addPath(e, []string{identName})
+ }
+
+ return selectAndIdentCost
+}
+
+func (c *coster) costSelect(e ast.Expr) CostEstimate {
+ sel := e.AsSelect()
+ var sum CostEstimate
+ if sel.IsTestOnly() {
+ // recurse, but do not add any cost
+ // this is equivalent to how evalTestOnly increments the runtime cost counter
+ // but does not add any additional cost for the qualifier, except here we do
+ // the reverse (ident adds cost)
+ sum = sum.Add(c.presenceTestCost)
+ sum = sum.Add(c.cost(sel.Operand()))
+ return sum
+ }
+ sum = sum.Add(c.cost(sel.Operand()))
+ targetType := c.getType(sel.Operand())
+ switch targetType.Kind() {
+ case types.MapKind, types.StructKind, types.TypeParamKind:
+ sum = sum.Add(selectAndIdentCost)
+ }
+
+ // build and track the field path
+ c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName()))
+
+ return sum
+}
+
+func (c *coster) costCall(e ast.Expr) CostEstimate {
+ call := e.AsCall()
+ args := call.Args()
+
+ var sum CostEstimate
+
+ argTypes := make([]AstNode, len(args))
+ argCosts := make([]CostEstimate, len(args))
+ for i, arg := range args {
+ argCosts[i] = c.cost(arg)
+ argTypes[i] = c.newAstNode(arg)
+ }
+
+ overloadIDs := c.checkedAST.GetOverloadIDs(e.ID())
+ if len(overloadIDs) == 0 {
+ return CostEstimate{}
+ }
+ var targetType AstNode
+ if call.IsMemberFunction() {
+ sum = sum.Add(c.cost(call.Target()))
+ targetType = c.newAstNode(call.Target())
+ }
+ // Pick a cost estimate range that covers all the overload cost estimation ranges
+ fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
+ var resultSize *SizeEstimate
+ for _, overload := range overloadIDs {
+ overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts)
+ fnCost = fnCost.Union(overloadCost.CostEstimate)
+ if overloadCost.ResultSize != nil {
+ if resultSize == nil {
+ resultSize = overloadCost.ResultSize
+ } else {
+ size := resultSize.Union(*overloadCost.ResultSize)
+ resultSize = &size
+ }
+ }
+ // build and track the field path for index operations
+ switch overload {
+ case overloads.IndexList:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@items"))
+ }
+ case overloads.IndexMap:
+ if len(args) > 0 {
+ c.addPath(e, append(c.getPath(args[0]), "@values"))
+ }
+ }
+ }
+ if resultSize != nil {
+ c.computedSizes[e.ID()] = *resultSize
+ }
+ return sum.Add(fnCost)
+}
+
+func (c *coster) costCreateList(e ast.Expr) CostEstimate {
+ create := e.AsList()
+ var sum CostEstimate
+ for _, e := range create.Elements() {
+ sum = sum.Add(c.cost(e))
+ }
+ return sum.Add(createListBaseCost)
+}
+
+func (c *coster) costCreateMap(e ast.Expr) CostEstimate {
+ mapVal := e.AsMap()
+ var sum CostEstimate
+ for _, ent := range mapVal.Entries() {
+ entry := ent.AsMapEntry()
+ sum = sum.Add(c.cost(entry.Key()))
+ sum = sum.Add(c.cost(entry.Value()))
+ }
+ return sum.Add(createMapBaseCost)
+}
+
+func (c *coster) costCreateStruct(e ast.Expr) CostEstimate {
+ msgVal := e.AsStruct()
+ var sum CostEstimate
+ for _, ent := range msgVal.Fields() {
+ field := ent.AsStructField()
+ sum = sum.Add(c.cost(field.Value()))
+ }
+ return sum.Add(createMessageBaseCost)
+}
+
+func (c *coster) costComprehension(e ast.Expr) CostEstimate {
+ comp := e.AsComprehension()
+ var sum CostEstimate
+ sum = sum.Add(c.cost(comp.IterRange()))
+ sum = sum.Add(c.cost(comp.AccuInit()))
+
+ // Track the iterRange of each IterVar for field path construction
+ c.iterRanges.push(comp.IterVar(), comp.IterRange())
+ loopCost := c.cost(comp.LoopCondition())
+ stepCost := c.cost(comp.LoopStep())
+ c.iterRanges.pop(comp.IterVar())
+ sum = sum.Add(c.cost(comp.Result()))
+ rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange()))
+
+ c.computedSizes[e.ID()] = rangeCnt
+
+ rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
+ sum = sum.Add(rangeCost)
+
+ return sum
+}
+
+func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
+ if l := t.ComputedSize(); l != nil {
+ return *l
+ }
+ if l := c.estimator.EstimateSize(t); l != nil {
+ return *l
+ }
+ // return an estimate of 1 for return types of set
+ // lengths, since strings/bytes/more complex objects could be of
+ // variable length
+ if isScalar(t.Type()) {
+ // TODO: since the logic for size estimation is split between
+ // ComputedSize and isScalar, changing one will likely require changing
+ // the other, so they should be merged in the future if possible
+ return SizeEstimate{Min: 1, Max: 1}
+ }
+ return SizeEstimate{Min: 0, Max: math.MaxUint64}
+}
+
+func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
+ argCostSum := func() CostEstimate {
+ var sum CostEstimate
+ for _, a := range argCosts {
+ sum = sum.Add(a)
+ }
+ return sum
+ }
+ if len(c.overloadEstimators) != 0 {
+ if estimator, found := c.overloadEstimators[overloadID]; found {
+ if est := estimator(c.estimator, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ }
+ }
+ if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
+ callEst := *est
+ return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
+ }
+ switch overloadID {
+ // O(n) functions
+ case overloads.ExtFormatString:
+ if target != nil {
+ // ResultSize not calculated because we can't bound the max size.
+ return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.StringToBytes:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char converts to 4 bytes.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
+ }
+ case overloads.BytesToString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize min is when 4 bytes convert to 1 char.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
+ }
+ case overloads.ExtQuoteString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char is escaped. 2 quote chars always added.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
+ }
+ case overloads.StartsWithString, overloads.EndsWithString:
+ if len(args) == 1 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ case overloads.InList:
+ // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
+ // We just assume all list containment checks are O(n).
+ if len(args) == 2 {
+ return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
+ }
+ // O(nm) functions
+ case overloads.MatchesString:
+ // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+ if target != nil && len(args) == 1 {
+ // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+ // in case where string is empty but regex is still expensive.
+ strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ // We don't know how many expressions are in the regex, just the string length (a huge
+ // improvement here would be to somehow get a count the number of expressions in the regex or
+ // how many states are in the regex state machine and use that to measure regex cost).
+ // For now, we're making a guess that each expression in a regex is typically at least 4 chars
+ // in length.
+ regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
+ }
+ case overloads.ContainsString:
+ if target != nil && len(args) == 1 {
+ strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
+ return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
+ }
+ case overloads.LogicalOr, overloads.LogicalAnd:
+ lhs := argCosts[0]
+ rhs := argCosts[1]
+ // min cost is min of LHS for short circuited && or ||
+ argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
+ return CallEstimate{CostEstimate: argCost}
+ case overloads.Conditional:
+ size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
+ conditionalCost := argCosts[0]
+ ifTrueCost := argCosts[1]
+ ifFalseCost := argCosts[2]
+ argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost))
+ return CallEstimate{CostEstimate: argCost, ResultSize: &size}
+ case overloads.AddString, overloads.AddBytes, overloads.AddList:
+ if len(args) == 2 {
+ lhsSize := c.sizeEstimate(args[0])
+ rhsSize := c.sizeEstimate(args[1])
+ resultSize := lhsSize.Add(rhsSize)
+ switch overloadID {
+ case overloads.AddList:
+ // list concatenation is O(1), but we handle it here to track size
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
+ default:
+ return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
+ }
+ }
+ case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
+ overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
+ overloads.Equals, overloads.NotEquals:
+ lhsCost := c.sizeEstimate(args[0])
+ rhsCost := c.sizeEstimate(args[1])
+ min := uint64(0)
+ smallestMax := lhsCost.Max
+ if rhsCost.Max < smallestMax {
+ smallestMax = rhsCost.Max
+ }
+ if smallestMax > 0 {
+ min = 1
+ }
+ // equality of 2 scalar values results in a cost of 1
+ return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
+ // O(1) functions
+ // See CostTracker.costCall for more details about O(1) cost calculations
+
+ // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
+ // which on an Intel xeon 2.20GHz CPU is 50ns.
+ return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
+}
+
+func (c *coster) getType(e ast.Expr) *types.Type {
+ return c.checkedAST.GetType(e.ID())
+}
+
+func (c *coster) getPath(e ast.Expr) []string {
+ return c.exprPath[e.ID()]
+}
+
+func (c *coster) addPath(e ast.Expr, path []string) {
+ c.exprPath[e.ID()] = path
+}
+
+func (c *coster) newAstNode(e ast.Expr) *astNode {
+ path := c.getPath(e)
+ if len(path) > 0 && path[0] == parser.AccumulatorName {
+ // only provide paths to root vars; omit accumulator vars
+ path = nil
+ }
+ var derivedSize *SizeEstimate
+ if size, ok := c.computedSizes[e.ID()]; ok {
+ derivedSize = &size
+ }
+ return &astNode{
+ path: path,
+ t: c.getType(e),
+ expr: e,
+ derivedSize: derivedSize}
+}
+
+// isScalar returns true if the given type is known to be of a constant size at
+// compile time. isScalar will return false for strings (they are variable-width)
+// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
+func isScalar(t *types.Type) bool {
+ switch t.Kind() {
+ case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
+ return true
+ }
+ return false
+}
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
new file mode 100644
index 000000000..a6b0be292
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/checker/decls",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go
new file mode 100644
index 000000000..c0e5de469
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -0,0 +1,237 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls provides helpers for creating variable and function declarations.
+package decls
+
+import (
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // Error type used to communicate issues during type-checking.
+ Error = &exprpb.Type{
+ TypeKind: &exprpb.Type_Error{
+ Error: &emptypb.Empty{}}}
+
+ // Dyn is a top-type used to represent any value.
+ Dyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_Dyn{
+ Dyn: &emptypb.Empty{}}}
+)
+
+// Commonly used types.
+var (
+ Bool = NewPrimitiveType(exprpb.Type_BOOL)
+ Bytes = NewPrimitiveType(exprpb.Type_BYTES)
+ Double = NewPrimitiveType(exprpb.Type_DOUBLE)
+ Int = NewPrimitiveType(exprpb.Type_INT64)
+ Null = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ String = NewPrimitiveType(exprpb.Type_STRING)
+ Uint = NewPrimitiveType(exprpb.Type_UINT64)
+)
+
+// Well-known types.
+// TODO: Replace with an abstract type registry.
+var (
+ Any = NewWellKnownType(exprpb.Type_ANY)
+ Duration = NewWellKnownType(exprpb.Type_DURATION)
+ Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
+)
+
+// NewAbstractType creates an abstract type declaration which references a proto
+// message name and may also include type parameters.
+func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_AbstractType_{
+ AbstractType: &exprpb.Type_AbstractType{
+ Name: name,
+ ParameterTypes: paramTypes}}}
+}
+
+// NewOptionalType constructs an abstract type indicating that the parameterized type
+// may be contained within the object.
+func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
+ return NewAbstractType("optional_type", paramType)
+}
+
+// NewFunctionType creates a function invocation contract, typically only used
+// by type-checking steps after overload resolution.
+func NewFunctionType(resultType *exprpb.Type,
+ argTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Function{
+ Function: &exprpb.Type_FunctionType{
+ ResultType: resultType,
+ ArgTypes: argTypes}}}
+}
+
+// NewFunction creates a named function declaration with one or more overloads.
+func NewFunction(name string,
+ overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Function{
+ Function: &exprpb.Decl_FunctionDecl{
+ Overloads: overloads}}}
+}
+
+// NewIdent creates a named identifier declaration with an optional literal
+// value.
+//
+// Literal values are typically only associated with enum identifiers.
+//
+// Deprecated: Use NewVar or NewConst instead.
+func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Ident{
+ Ident: &exprpb.Decl_IdentDecl{
+ Type: t,
+ Value: v}}}
+}
+
+// NewConst creates a constant identifier with a CEL constant literal value.
+func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return NewIdent(name, t, v)
+}
+
+// NewVar creates a variable identifier.
+func NewVar(name string, t *exprpb.Type) *exprpb.Decl {
+ return NewIdent(name, t, nil)
+}
+
+// NewInstanceOverload creates a instance function overload contract.
+// First element of argTypes is instance.
+func NewInstanceOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: true}
+}
+
+// NewListType generates a new list with elements of a certain type.
+func NewListType(elem *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: elem}}}
+}
+
+// NewMapType generates a new map with typed keys and values.
+func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: key,
+ ValueType: value}}}
+}
+
+// NewObjectType creates an object type for a qualified type name.
+func NewObjectType(typeName string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: typeName}}
+}
+
+// NewOverload creates a function overload declaration which contains a unique
+// overload id as well as the expected argument and result types. Overloads
+// must be aggregated within a Function declaration.
+func NewOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: false}
+}
+
+// NewParameterizedInstanceOverload creates a parametric function instance overload type.
+func NewParameterizedInstanceOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: true}
+}
+
+// NewParameterizedOverload creates a parametric function overload type.
+func NewParameterizedOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: false}
+}
+
+// NewPrimitiveType creates a type for a primitive value. See the var declarations
+// for Int, Uint, etc.
+func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{
+ Primitive: primitive}}
+}
+
+// NewTypeType creates a new type designating a type.
+func NewTypeType(nested *exprpb.Type) *exprpb.Type {
+ if nested == nil {
+ // must set the nested field for a valid oneof option
+ nested = &exprpb.Type{}
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: nested}}
+}
+
+// NewTypeParamType creates a type corresponding to a named, contextual parameter.
+func NewTypeParamType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_TypeParam{
+ TypeParam: name}}
+}
+
+// NewWellKnownType creates a type corresponding to a protobuf well-known type
+// value.
+func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{
+ WellKnown: wellKnown}}
+}
+
+// NewWrapperType creates a wrapped primitive type instance. Wrapped types
+// are roughly equivalent to a nullable, or optionally valued type.
+func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
+ primitive := wrapped.GetPrimitive()
+ if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
+ // TODO: return an error
+ panic("Wrapped type must be a primitive")
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{
+ Wrapper: primitive}}
+}
diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go
new file mode 100644
index 000000000..d5ac05014
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/env.go
@@ -0,0 +1,284 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser"
+)
+
+type aggregateLiteralElementType int
+
+const (
+ dynElementType aggregateLiteralElementType = iota
+ homogenousElementType aggregateLiteralElementType = 1 << iota
+)
+
+var (
+ crossTypeNumericComparisonOverloads = map[string]struct{}{
+ // double <-> int | uint
+ overloads.LessDoubleInt64: {},
+ overloads.LessDoubleUint64: {},
+ overloads.LessEqualsDoubleInt64: {},
+ overloads.LessEqualsDoubleUint64: {},
+ overloads.GreaterDoubleInt64: {},
+ overloads.GreaterDoubleUint64: {},
+ overloads.GreaterEqualsDoubleInt64: {},
+ overloads.GreaterEqualsDoubleUint64: {},
+ // int <-> double | uint
+ overloads.LessInt64Double: {},
+ overloads.LessInt64Uint64: {},
+ overloads.LessEqualsInt64Double: {},
+ overloads.LessEqualsInt64Uint64: {},
+ overloads.GreaterInt64Double: {},
+ overloads.GreaterInt64Uint64: {},
+ overloads.GreaterEqualsInt64Double: {},
+ overloads.GreaterEqualsInt64Uint64: {},
+ // uint <-> double | int
+ overloads.LessUint64Double: {},
+ overloads.LessUint64Int64: {},
+ overloads.LessEqualsUint64Double: {},
+ overloads.LessEqualsUint64Int64: {},
+ overloads.GreaterUint64Double: {},
+ overloads.GreaterUint64Int64: {},
+ overloads.GreaterEqualsUint64Double: {},
+ overloads.GreaterEqualsUint64Int64: {},
+ }
+)
+
+// Env is the environment for type checking.
+//
+// The Env is comprised of a container, type provider, declarations, and other related objects
+// which can be used to assist with type-checking.
+type Env struct {
+ container *containers.Container
+ provider types.Provider
+ declarations *Scopes
+ aggLitElemType aggregateLiteralElementType
+ filteredOverloadIDs map[string]struct{}
+}
+
+// NewEnv returns a new *Env with the given parameters.
+func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) {
+ declarations := newScopes()
+ declarations.Push()
+
+ envOptions := &options{}
+ for _, opt := range opts {
+ if err := opt(envOptions); err != nil {
+ return nil, err
+ }
+ }
+ aggLitElemType := dynElementType
+ if envOptions.homogeneousAggregateLiterals {
+ aggLitElemType = homogenousElementType
+ }
+ filteredOverloadIDs := crossTypeNumericComparisonOverloads
+ if envOptions.crossTypeNumericComparisons {
+ filteredOverloadIDs = make(map[string]struct{})
+ }
+ if envOptions.validatedDeclarations != nil {
+ declarations = envOptions.validatedDeclarations.Copy()
+ }
+ return &Env{
+ container: container,
+ provider: provider,
+ declarations: declarations,
+ aggLitElemType: aggLitElemType,
+ filteredOverloadIDs: filteredOverloadIDs,
+ }, nil
+}
+
+// AddIdents configures the checker with a list of variable declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.addIdent(d))
+ }
+ return formatError(errMsgs)
+}
+
+// AddFunctions configures the checker with a list of function declarations.
+//
+// If there are overlapping declarations, the method will error.
+func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, d := range declarations {
+ errMsgs = append(errMsgs, e.setFunction(d)...)
+ }
+ return formatError(errMsgs)
+}
+
+// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
+// Returns nil if no such identifier is found in the Env.
+func (e *Env) LookupIdent(name string) *decls.VariableDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if ident := e.declarations.FindIdent(candidate); ident != nil {
+ return ident
+ }
+
+ // Next try to import the name as a reference to a message type. If found,
+ // the declaration is added to the outest (global) scope of the
+ // environment, so next time we can access it faster.
+ if t, found := e.provider.FindStructType(candidate); found {
+ decl := decls.NewVariable(candidate, t)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+
+ if i, found := e.provider.FindIdent(candidate); found {
+ if t, ok := i.(*types.Type); ok {
+ decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t))
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+
+ // Next try to import this as an enum value by splitting the name in a type prefix and
+ // the enum inside.
+ if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
+ decl := decls.NewConstant(candidate, types.IntType, enumValue)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+ return nil
+}
+
+// LookupFunction returns a Decl proto for typeName as a function in env.
+// Returns nil if no such function is found in env.
+func (e *Env) LookupFunction(name string) *decls.FunctionDecl {
+ for _, candidate := range e.container.ResolveCandidateNames(name) {
+ if fn := e.declarations.FindFunction(candidate); fn != nil {
+ return fn
+ }
+ }
+ return nil
+}
+
+// setFunction adds the function Decl to the Env.
+// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
+// If overload overlaps with an existing overload, adds to the errors in the Env instead.
+func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg {
+ errMsgs := make([]errorMsg, 0)
+ current := e.declarations.FindFunction(fn.Name())
+ if current != nil {
+ var err error
+ current, err = current.Merge(fn)
+ if err != nil {
+ return append(errMsgs, errorMsg(err.Error()))
+ }
+ } else {
+ current = fn
+ }
+ for _, overload := range current.OverloadDecls() {
+ for _, macro := range parser.AllMacros {
+ if macro.Function() == current.Name() &&
+ macro.IsReceiverStyle() == overload.IsMemberFunction() &&
+ macro.ArgCount() == len(overload.ArgTypes()) {
+ errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount()))
+ }
+ }
+ if len(errMsgs) > 0 {
+ return errMsgs
+ }
+ }
+ e.declarations.SetFunction(current)
+ return errMsgs
+}
+
+// addIdent adds the Decl to the declarations in the Env.
+// Returns a non-empty errorMsg if the identifier is already declared in the scope.
+func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg {
+ current := e.declarations.FindIdentInScope(decl.Name())
+ if current != nil {
+ if current.DeclarationIsEquivalent(decl) {
+ return ""
+ }
+ return overlappingIdentifierError(decl.Name())
+ }
+ e.declarations.AddIdent(decl)
+ return ""
+}
+
+// isOverloadDisabled returns whether the overloadID is disabled in the current environment.
+func (e *Env) isOverloadDisabled(overloadID string) bool {
+ _, found := e.filteredOverloadIDs[overloadID]
+ return found
+}
+
+// validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
+// must be copied before use.
+func (e *Env) validatedDeclarations() *Scopes {
+ return e.declarations
+}
+
+// enterScope creates a new Env instance with a new innermost declaration scope.
+func (e *Env) enterScope() *Env {
+ childDecls := e.declarations.Push()
+ return &Env{
+ declarations: childDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// exitScope creates a new Env instance with the nearest outer declaration scope.
+func (e *Env) exitScope() *Env {
+ parentDecls := e.declarations.Pop()
+ return &Env{
+ declarations: parentDecls,
+ container: e.container,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// errorMsg is a type alias meant to represent error-based return values which
+// may be accumulated into an error at a later point in execution.
+type errorMsg string
+
+func overlappingIdentifierError(name string) errorMsg {
+ return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
+}
+
+func overlappingMacroError(name string, argCount int) errorMsg {
+ return errorMsg(fmt.Sprintf(
+ "overlapping macro for name '%s' with %d args", name, argCount))
+}
+
+func formatError(errMsgs []errorMsg) error {
+ errStrs := make([]string, 0)
+ if len(errMsgs) > 0 {
+ for i := 0; i < len(errMsgs); i++ {
+ if errMsgs[i] != "" {
+ errStrs = append(errStrs, string(errMsgs[i]))
+ }
+ }
+ }
+ if len(errStrs) > 0 {
+ return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go
new file mode 100644
index 000000000..8b3bf0b8b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/errors.go
@@ -0,0 +1,88 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+)
+
+// typeErrors is a specialization of Errors.
+type typeErrors struct {
+ errs *common.Errors
+}
+
+func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'",
+ name, FormatCELType(field), FormatCELType(value))
+}
+
+func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) {
+ e.errs.ReportErrorAtID(id, l,
+ "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+}
+
+func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
+ signature := formatFunctionDeclType(nil, args, isInstance)
+ e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature)
+}
+
+func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
+ FormatCELType(t))
+}
+
+func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) {
+ e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
+}
+
+func (e *typeErrors) notAType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName)
+}
+
+func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
+}
+
+func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) {
+ e.errs.ReportErrorAtID(id, l,
+ "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next)
+}
+
+func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t))
+}
+
+func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) {
+ e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'",
+ FormatCELType(expected), FormatCELType(actual))
+}
+
+func (e *typeErrors) undefinedField(id int64, l common.Location, field string) {
+ e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field)
+}
+
+func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) {
+ e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container)
+}
+
+func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
+}
+
+func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) {
+ e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName)
+}
diff --git a/vendor/github.com/google/cel-go/checker/format.go b/vendor/github.com/google/cel-go/checker/format.go
new file mode 100644
index 000000000..95842905e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/format.go
@@ -0,0 +1,216 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ kindUnknown = iota + 1
+ kindError
+ kindFunction
+ kindDyn
+ kindPrimitive
+ kindWellKnown
+ kindWrapper
+ kindNull
+ kindAbstract
+ kindType
+ kindList
+ kindMap
+ kindObject
+ kindTypeParam
+)
+
+// FormatCheckedType converts a type message into a string representation.
+func FormatCheckedType(t *exprpb.Type) string {
+ switch kindOf(t) {
+ case kindDyn:
+ return "dyn"
+ case kindFunction:
+ return formatFunctionExprType(t.GetFunction().GetResultType(),
+ t.GetFunction().GetArgTypes(),
+ false)
+ case kindList:
+ return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
+ case kindObject:
+ return t.GetMessageType()
+ case kindMap:
+ return fmt.Sprintf("map(%s, %s)",
+ FormatCheckedType(t.GetMapType().GetKeyType()),
+ FormatCheckedType(t.GetMapType().GetValueType()))
+ case kindNull:
+ return "null"
+ case kindPrimitive:
+ switch t.GetPrimitive() {
+ case exprpb.Type_UINT64:
+ return "uint"
+ case exprpb.Type_INT64:
+ return "int"
+ }
+ return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
+ case kindType:
+ if t.GetType() == nil || t.GetType().GetTypeKind() == nil {
+ return "type"
+ }
+ return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
+ case kindWellKnown:
+ switch t.GetWellKnown() {
+ case exprpb.Type_ANY:
+ return "any"
+ case exprpb.Type_DURATION:
+ return "duration"
+ case exprpb.Type_TIMESTAMP:
+ return "timestamp"
+ }
+ case kindWrapper:
+ return fmt.Sprintf("wrapper(%s)",
+ FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper())))
+ case kindError:
+ return "!error!"
+ case kindTypeParam:
+ return t.GetTypeParam()
+ case kindAbstract:
+ at := t.GetAbstractType()
+ params := at.GetParameterTypes()
+ paramStrs := make([]string, len(params))
+ for i, p := range params {
+ paramStrs[i] = FormatCheckedType(p)
+ }
+ return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
+ }
+ return t.String()
+}
+
+type formatter func(any) string
+
+// FormatCELType formats a types.Type value to a string representation.
+//
+// The type formatting is identical to FormatCheckedType.
+func FormatCELType(t any) string {
+ dt := t.(*types.Type)
+ switch dt.Kind() {
+ case types.AnyKind:
+ return "any"
+ case types.DurationKind:
+ return "duration"
+ case types.ErrorKind:
+ return "!error!"
+ case types.NullTypeKind:
+ return "null"
+ case types.TimestampKind:
+ return "timestamp"
+ case types.TypeParamKind:
+ return dt.TypeName()
+ case types.OpaqueKind:
+ if dt.TypeName() == "function" {
+ // There is no explicit function type in the new types representation, so information like
+ // whether the function is a member function is absent.
+ return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false)
+ }
+ case types.UnspecifiedKind:
+ return ""
+ }
+ if len(dt.Parameters()) == 0 {
+ return dt.DeclaredTypeName()
+ }
+ paramTypeNames := make([]string, 0, len(dt.Parameters()))
+ for _, p := range dt.Parameters() {
+ paramTypeNames = append(paramTypeNames, FormatCELType(p))
+ }
+ return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", "))
+}
+
+func formatExprType(t any) string {
+ if t == nil {
+ return ""
+ }
+ return FormatCheckedType(t.(*exprpb.Type))
+}
+
+func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
+ return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType)
+}
+
+func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string {
+ return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType)
+}
+
+func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string {
+ result := ""
+ if isInstance {
+ target := argTypes[0]
+ argTypes = argTypes[1:]
+ result += format(target)
+ result += "."
+ }
+ result += "("
+ for i, arg := range argTypes {
+ if i > 0 {
+ result += ", "
+ }
+ result += format(arg)
+ }
+ result += ")"
+ rt := format(resultType)
+ if rt != "" {
+ result += " -> "
+ result += rt
+ }
+ return result
+}
+
+// kindOf returns the kind of the type as defined in the checked.proto.
+func kindOf(t *exprpb.Type) int {
+ if t == nil || t.TypeKind == nil {
+ return kindUnknown
+ }
+ switch t.GetTypeKind().(type) {
+ case *exprpb.Type_Error:
+ return kindError
+ case *exprpb.Type_Function:
+ return kindFunction
+ case *exprpb.Type_Dyn:
+ return kindDyn
+ case *exprpb.Type_Primitive:
+ return kindPrimitive
+ case *exprpb.Type_WellKnown:
+ return kindWellKnown
+ case *exprpb.Type_Wrapper:
+ return kindWrapper
+ case *exprpb.Type_Null:
+ return kindNull
+ case *exprpb.Type_Type:
+ return kindType
+ case *exprpb.Type_ListType_:
+ return kindList
+ case *exprpb.Type_MapType_:
+ return kindMap
+ case *exprpb.Type_MessageType:
+ return kindObject
+ case *exprpb.Type_TypeParam:
+ return kindTypeParam
+ case *exprpb.Type_AbstractType_:
+ return kindAbstract
+ }
+ return kindUnknown
+}
diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go
new file mode 100644
index 000000000..8163a908a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/mapping.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+type mapping struct {
+ mapping map[string]*types.Type
+}
+
+func newMapping() *mapping {
+ return &mapping{
+ mapping: make(map[string]*types.Type),
+ }
+}
+
+func (m *mapping) add(from, to *types.Type) {
+ m.mapping[FormatCELType(from)] = to
+}
+
+func (m *mapping) find(from *types.Type) (*types.Type, bool) {
+ if r, found := m.mapping[FormatCELType(from)]; found {
+ return r, found
+ }
+ return nil, false
+}
+
+func (m *mapping) copy() *mapping {
+ c := newMapping()
+
+ for k, v := range m.mapping {
+ c.mapping[k] = v
+ }
+ return c
+}
diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go
new file mode 100644
index 000000000..0560c3813
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/options.go
@@ -0,0 +1,42 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+type options struct {
+ crossTypeNumericComparisons bool
+ homogeneousAggregateLiterals bool
+ validatedDeclarations *Scopes
+}
+
+// Option is a functional option for configuring the type-checker
+type Option func(*options) error
+
+// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type
+// See https://github.com/google/cel-spec/wiki/proposal-210 for more details.
+func CrossTypeNumericComparisons(enabled bool) Option {
+ return func(opts *options) error {
+ opts.crossTypeNumericComparisons = enabled
+ return nil
+ }
+}
+
+// ValidatedDeclarations provides a references to validated declarations which will be copied
+// into new checker instances.
+func ValidatedDeclarations(env *Env) Option {
+ return func(opts *options) error {
+ opts.validatedDeclarations = env.validatedDeclarations()
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
new file mode 100644
index 000000000..7a3984f02
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "sort"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/debug"
+)
+
+type semanticAdorner struct {
+ checked *ast.AST
+}
+
+var _ debug.Adorner = &semanticAdorner{}
+
+func (a *semanticAdorner) GetMetadata(elem any) string {
+ result := ""
+ e, isExpr := elem.(ast.Expr)
+ if !isExpr {
+ return result
+ }
+ t := a.checked.TypeMap()[e.ID()]
+ if t != nil {
+ result += "~"
+ result += FormatCELType(t)
+ }
+
+ switch e.Kind() {
+ case ast.IdentKind,
+ ast.CallKind,
+ ast.ListKind,
+ ast.StructKind,
+ ast.SelectKind:
+ if ref, found := a.checked.ReferenceMap()[e.ID()]; found {
+ if len(ref.OverloadIDs) == 0 {
+ result += "^" + ref.Name
+ } else {
+ sort.Strings(ref.OverloadIDs)
+ for i, overload := range ref.OverloadIDs {
+ if i == 0 {
+ result += "^"
+ } else {
+ result += "|"
+ }
+ result += overload
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// Print returns a string representation of the Expr message,
+// annotated with types from the CheckedExpr. The Expr must
+// be a sub-expression embedded in the CheckedExpr.
+func Print(e ast.Expr, checked *ast.AST) string {
+ a := &semanticAdorner{checked: checked}
+ return debug.ToAdornedDebugString(e, a)
+}
diff --git a/vendor/github.com/google/cel-go/checker/scopes.go b/vendor/github.com/google/cel-go/checker/scopes.go
new file mode 100644
index 000000000..8bb73ddb6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/scopes.go
@@ -0,0 +1,147 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/decls"
+)
+
+// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
+// identifiers in scope and an optional parent representing outer scopes.
+// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
+// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
+type Scopes struct {
+ parent *Scopes
+ scopes *Group
+}
+
+// newScopes creates a new, empty Scopes.
+// Some operations can't be safely performed until a Group is added with Push.
+func newScopes() *Scopes {
+ return &Scopes{
+ scopes: newGroup(),
+ }
+}
+
+// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
+func (s *Scopes) Copy() *Scopes {
+ cpy := newScopes()
+ if s == nil {
+ return cpy
+ }
+ if s.parent != nil {
+ cpy.parent = s.parent.Copy()
+ }
+ cpy.scopes = s.scopes.copy()
+ return cpy
+}
+
+// Push creates a new Scopes value which references the current Scope as its parent.
+func (s *Scopes) Push() *Scopes {
+ return &Scopes{
+ parent: s,
+ scopes: newGroup(),
+ }
+}
+
+// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
+// is nil.
+func (s *Scopes) Pop() *Scopes {
+ if s.parent != nil {
+ return s.parent
+ }
+ // TODO: Consider whether this should be an error / panic.
+ return s
+}
+
+// AddIdent adds the ident Decl in the current scope.
+// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
+func (s *Scopes) AddIdent(decl *decls.VariableDecl) {
+ s.scopes.idents[decl.Name()] = decl
+}
+
+// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
+// found.
+// Note: The search is performed from innermost to outermost.
+func (s *Scopes) FindIdent(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ if s.parent != nil {
+ return s.parent.FindIdent(name)
+ }
+ return nil
+}
+
+// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
+// nil if one does not exist.
+// Note: The search is only performed on the current scope and does not search outer scopes.
+func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ return nil
+}
+
+// SetFunction adds the function Decl to the current scope.
+// Note: Any previous entry for a function in the current scope with the same name is overwritten.
+func (s *Scopes) SetFunction(fn *decls.FunctionDecl) {
+ s.scopes.functions[fn.Name()] = fn
+}
+
+// FindFunction finds the first function Decl with a matching name in Scopes.
+// The search is performed from innermost to outermost.
+// Returns nil if no such function in Scopes.
+func (s *Scopes) FindFunction(name string) *decls.FunctionDecl {
+ if fn, found := s.scopes.functions[name]; found {
+ return fn
+ }
+ if s.parent != nil {
+ return s.parent.FindFunction(name)
+ }
+ return nil
+}
+
+// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
+// Contains separate namespaces for identifier and function Decls.
+// (Should be named "Scope" perhaps?)
+type Group struct {
+ idents map[string]*decls.VariableDecl
+ functions map[string]*decls.FunctionDecl
+}
+
+// copy creates a new Group instance with a shallow copy of the variables and functions.
+// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
+func (g *Group) copy() *Group {
+ cpy := &Group{
+ idents: make(map[string]*decls.VariableDecl, len(g.idents)),
+ functions: make(map[string]*decls.FunctionDecl, len(g.functions)),
+ }
+ for n, id := range g.idents {
+ cpy.idents[n] = id
+ }
+ for n, fn := range g.functions {
+ cpy.functions[n] = fn
+ }
+ return cpy
+}
+
+// newGroup creates a new Group with empty maps for identifiers and functions.
+func newGroup() *Group {
+ return &Group{
+ idents: make(map[string]*decls.VariableDecl),
+ functions: make(map[string]*decls.FunctionDecl),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go
new file mode 100644
index 000000000..4c65b2737
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/types.go
@@ -0,0 +1,314 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/types"
+)
+
+// isDyn returns true if the input t is either type DYN or a well-known ANY message.
+func isDyn(t *types.Type) bool {
+ // Note: object type values that are well-known and map to a DYN value in practice
+ // are sanitized prior to being added to the environment.
+ switch t.Kind() {
+ case types.DynKind, types.AnyKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
+func isDynOrError(t *types.Type) bool {
+ return isError(t) || isDyn(t)
+}
+
+func isError(t *types.Type) bool {
+ return t.Kind() == types.ErrorKind
+}
+
+func isOptional(t *types.Type) bool {
+ if t.Kind() == types.OpaqueKind {
+ return t.TypeName() == "optional_type"
+ }
+ return false
+}
+
+func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) {
+ if isOptional(t) {
+ return t.Parameters()[0], true
+ }
+ return t, false
+}
+
+// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
+// A type is less specific if it matches the other type using the DYN type.
+func isEqualOrLessSpecific(t1, t2 *types.Type) bool {
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ // The first type is less specific.
+ if isDyn(t1) || kind1 == types.TypeParamKind {
+ return true
+ }
+ // The first type is not less specific.
+ if isDyn(t2) || kind2 == types.TypeParamKind {
+ return false
+ }
+ // Types must be of the same kind to be equal.
+ if kind1 != kind2 {
+ return false
+ }
+
+ // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
+ // order to return true.
+ switch kind1 {
+ case types.OpaqueKind:
+ if t1.TypeName() != t2.TypeName() ||
+ len(t1.Parameters()) != len(t2.Parameters()) {
+ return false
+ }
+ for i, p1 := range t1.Parameters() {
+ if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) {
+ return false
+ }
+ }
+ return true
+ case types.ListKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0])
+ case types.MapKind:
+ return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) &&
+ isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1])
+ case types.TypeKind:
+ return true
+ default:
+ return t1.IsExactType(t2)
+ }
+}
+
+// / internalIsAssignable returns true if t1 is assignable to t2.
+func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
+ // Process type parameters.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind2 == types.TypeParamKind {
+ // If t2 is a valid type substitution for t1, return true.
+ valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
+ if valid {
+ return true
+ }
+ // If t2 is not a valid type sub for t1, and already has a known substitution return false
+ // since it is not possible for t1 to be a substitution for t2.
+ if !valid && t2HasSub {
+ return false
+ }
+ // Otherwise, fall through to check whether t1 is a possible substitution for t2.
+ }
+ if kind1 == types.TypeParamKind {
+ // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
+ // possible type substitutions have been searched in both directions.
+ valid, _ := isValidTypeSubstitution(m, t2, t1)
+ return valid
+ }
+
+ // Next check for wildcard types.
+ if isDynOrError(t1) || isDynOrError(t2) {
+ return true
+ }
+ // Preserve the nullness checks of the legacy type-checker.
+ if kind1 == types.NullTypeKind {
+ return internalIsAssignableNull(t2)
+ }
+ if kind2 == types.NullTypeKind {
+ return internalIsAssignableNull(t1)
+ }
+
+ // Test for when the types do not need to agree, but are more specific than dyn.
+ switch kind1 {
+ case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
+ types.AnyKind, types.DurationKind, types.TimestampKind,
+ types.StructKind:
+ // Test whether t2 is assignable from t1. The order of this check won't usually matter;
+ // however, there may be cases where type capabilities are expanded beyond what is supported
+ // in the current common/types package. For example, an interface designation for a group of
+ // Struct types.
+ return t2.IsAssignableType(t1)
+ case types.TypeKind:
+ return kind2 == types.TypeKind
+ case types.OpaqueKind, types.ListKind, types.MapKind:
+ return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() &&
+ internalIsAssignableList(m, t1.Parameters(), t2.Parameters())
+ default:
+ return false
+ }
+}
+
+// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type
+// substitution for t1, and whether t2 has a type substitution in mapping m.
+//
+// The type t2 is a valid substitution for t1 if any of the following statements is true
+// - t2 has a type substitution (t2sub) equal to t1
+// - t2 has a type substitution (t2sub) assignable to t1
+// - t2 does not occur within t1.
+func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) {
+ // Early return if the t1 and t2 are the same instance.
+ kind1, kind2 := t1.Kind(), t2.Kind()
+ if kind1 == kind2 && t1.IsExactType(t2) {
+ return true, true
+ }
+ if t2Sub, found := m.find(t2); found {
+ // Early return if t1 and t2Sub are the same instance as otherwise the mapping
+ // might mark a type as being a subtitution for itself.
+ if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) {
+ return true, true
+ }
+ // If the types are compatible, pick the more general type and return true
+ if internalIsAssignable(m, t1, t2Sub) {
+ t2New := mostGeneral(t1, t2Sub)
+ // only update the type reference map if the target type does not occur within it.
+ if notReferencedIn(m, t2, t2New) {
+ m.add(t2, t2New)
+ }
+ // acknowledge the type agreement, and that the substitution is already tracked.
+ return true, true
+ }
+ return false, true
+ }
+ if notReferencedIn(m, t2, t1) {
+ m.add(t2, t1)
+ return true, false
+ }
+ return false, false
+}
+
+// internalIsAssignableList returns true if the element types at each index in the list are
+// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
+// assignable.
+func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool {
+ if len(l1) != len(l2) {
+ return false
+ }
+ for i, t1 := range l1 {
+ if !internalIsAssignable(m, t1, l2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// internalIsAssignableNull returns true if the type is nullable.
+func internalIsAssignableNull(t *types.Type) bool {
+ return isLegacyNullable(t) || t.IsAssignableType(types.NullType)
+}
+
+// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation.
+func isLegacyNullable(t *types.Type) bool {
+ switch t.Kind() {
+ case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind:
+ return true
+ }
+ return false
+}
+
+// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
+func isAssignable(m *mapping, t1, t2 *types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignable(mCopy, t1, t2) {
+ return mCopy
+ }
+ return nil
+}
+
+// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
+func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignableList(mCopy, l1, l2) {
+ return mCopy
+ }
+ return nil
+}
+
+// mostGeneral returns the more general of two types which are known to unify.
+func mostGeneral(t1, t2 *types.Type) *types.Type {
+ if isEqualOrLessSpecific(t1, t2) {
+ return t1
+ }
+ return t2
+}
+
+// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
+// type. This is a standard requirement for type unification, commonly referred to as the "occurs
+// check".
+func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
+ if t.IsExactType(withinType) {
+ return false
+ }
+ withinKind := withinType.Kind()
+ switch withinKind {
+ case types.TypeParamKind:
+ wtSub, found := m.find(withinType)
+ if !found {
+ return true
+ }
+ return notReferencedIn(m, t, wtSub)
+ case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind:
+ for _, pt := range withinType.Parameters() {
+ if !notReferencedIn(m, t, pt) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
+
+// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
+// parameters are replaced by DYN if typeParamToDyn is true.
+func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
+ if tSub, found := m.find(t); found {
+ return substitute(m, tSub, typeParamToDyn)
+ }
+ kind := t.Kind()
+ if typeParamToDyn && kind == types.TypeParamKind {
+ return types.DynType
+ }
+ switch kind {
+ case types.OpaqueKind:
+ return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...)
+ case types.ListKind:
+ return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn))
+ case types.MapKind:
+ return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn),
+ substitute(m, t.Parameters()[1], typeParamToDyn))
+ case types.TypeKind:
+ if len(t.Parameters()) > 0 {
+ tParam := t.Parameters()[0]
+ return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn))
+ }
+ return t
+ default:
+ return t
+ }
+}
+
+func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type {
+ subParams := make([]*types.Type, len(typeParams))
+ for i, tp := range typeParams {
+ subParams[i] = substitute(m, tp, typeParamToDyn)
+ }
+ return subParams
+}
+
+func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type {
+ return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...)
+}
diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel
new file mode 100644
index 000000000..eef7f281b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cost.go",
+ "error.go",
+ "errors.go",
+ "location.go",
+ "source.go",
+ ],
+ importpath = "github.com/google/cel-go/common",
+ deps = [
+ "//common/runes:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "errors_test.go",
+ "source_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
new file mode 100644
index 000000000..9824f57a9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel
@@ -0,0 +1,57 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "ast.go",
+ "conversion.go",
+ "expr.go",
+ "factory.go",
+ "navigable.go",
+ ],
+ importpath = "github.com/google/cel-go/common/ast",
+ deps = [
+ "//common:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "ast_test.go",
+ "conversion_test.go",
+ "expr_test.go",
+ "navigable_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//checker:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//encoding/prototext:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go
new file mode 100644
index 000000000..b807669d4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/ast.go
@@ -0,0 +1,457 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ast declares data structures useful for parsed and checked abstract syntax trees
+package ast
+
+import (
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// AST contains a protobuf expression and source info along with CEL-native type and reference information.
+type AST struct {
+ expr Expr
+ sourceInfo *SourceInfo
+ typeMap map[int64]*types.Type
+ refMap map[int64]*ReferenceInfo
+}
+
+// Expr returns the root ast.Expr value in the AST.
+func (a *AST) Expr() Expr {
+ if a == nil {
+ return nilExpr
+ }
+ return a.expr
+}
+
+// SourceInfo returns the source metadata associated with the parse / type-check passes.
+func (a *AST) SourceInfo() *SourceInfo {
+ if a == nil {
+ return nil
+ }
+ return a.sourceInfo
+}
+
+// GetType returns the type for the expression at the given id, if one exists, else types.DynType.
+func (a *AST) GetType(id int64) *types.Type {
+ if t, found := a.TypeMap()[id]; found {
+ return t
+ }
+ return types.DynType
+}
+
+// SetType sets the type of the expression node at the given id.
+func (a *AST) SetType(id int64, t *types.Type) {
+ if a == nil {
+ return
+ }
+ a.typeMap[id] = t
+}
+
+// TypeMap returns the map of expression ids to type-checked types.
+//
+// If the AST is not type-checked, the map will be empty.
+func (a *AST) TypeMap() map[int64]*types.Type {
+ if a == nil {
+ return map[int64]*types.Type{}
+ }
+ return a.typeMap
+}
+
+// GetOverloadIDs returns the set of overload function names for a given expression id.
+//
+// If the expression id is not a function call, or the AST is not type-checked, the result will be empty.
+func (a *AST) GetOverloadIDs(id int64) []string {
+ if ref, found := a.ReferenceMap()[id]; found {
+ return ref.OverloadIDs
+ }
+ return []string{}
+}
+
+// ReferenceMap returns the map of expression id to identifier, constant, and function references.
+func (a *AST) ReferenceMap() map[int64]*ReferenceInfo {
+ if a == nil {
+ return map[int64]*ReferenceInfo{}
+ }
+ return a.refMap
+}
+
+// SetReference adds a reference to the checked AST type map.
+func (a *AST) SetReference(id int64, r *ReferenceInfo) {
+ if a == nil {
+ return
+ }
+ a.refMap[id] = r
+}
+
+// IsChecked returns whether the AST is type-checked.
+func (a *AST) IsChecked() bool {
+ return a != nil && len(a.TypeMap()) > 0
+}
+
+// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value.
+func NewAST(e Expr, sourceInfo *SourceInfo) *AST {
+ if e == nil {
+ e = nilExpr
+ }
+ return &AST{
+ expr: e,
+ sourceInfo: sourceInfo,
+ typeMap: make(map[int64]*types.Type),
+ refMap: make(map[int64]*ReferenceInfo),
+ }
+}
+
+// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata.
+func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST {
+ return &AST{
+ expr: parsed.Expr(),
+ sourceInfo: parsed.SourceInfo(),
+ typeMap: typeMap,
+ refMap: refMap,
+ }
+}
+
+// Copy creates a deep copy of the Expr and SourceInfo values in the input AST.
+//
+// Copies of the Expr value are generated using an internal default ExprFactory.
+func Copy(a *AST) *AST {
+ if a == nil {
+ return nil
+ }
+ e := defaultFactory.CopyExpr(a.expr)
+ if !a.IsChecked() {
+ return NewAST(e, CopySourceInfo(a.SourceInfo()))
+ }
+ typesCopy := make(map[int64]*types.Type, len(a.typeMap))
+ for id, t := range a.typeMap {
+ typesCopy[id] = t
+ }
+ refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap))
+ for id, r := range a.refMap {
+ refsCopy[id] = r
+ }
+ return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy)
+}
+
+// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value.
+func MaxID(a *AST) int64 {
+ visitor := &maxIDVisitor{maxID: 1}
+ PostOrderVisit(a.Expr(), visitor)
+ for id, call := range a.SourceInfo().MacroCalls() {
+ PostOrderVisit(call, visitor)
+ if id > visitor.maxID {
+ visitor.maxID = id + 1
+ }
+ }
+ return visitor.maxID + 1
+}
+
+// NewSourceInfo creates a simple SourceInfo object from an input common.Source value.
+func NewSourceInfo(src common.Source) *SourceInfo {
+ var lineOffsets []int32
+ var desc string
+ baseLine := int32(0)
+ baseCol := int32(0)
+ if src != nil {
+ desc = src.Description()
+ lineOffsets = src.LineOffsets()
+ // Determine whether the source metadata should be computed relative
+ // to a base line and column value. This can be determined by requesting
+ // the location for offset 0 from the source object.
+ if loc, found := src.OffsetLocation(0); found {
+ baseLine = int32(loc.Line()) - 1
+ baseCol = int32(loc.Column())
+ }
+ }
+ return &SourceInfo{
+ desc: desc,
+ lines: lineOffsets,
+ baseLine: baseLine,
+ baseCol: baseCol,
+ offsetRanges: make(map[int64]OffsetRange),
+ macroCalls: make(map[int64]Expr),
+ }
+}
+
+// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo.
+//
+// Copies of macro Expr values are generated using an internal default ExprFactory.
+func CopySourceInfo(info *SourceInfo) *SourceInfo {
+ if info == nil {
+ return nil
+ }
+ rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges))
+ for id, off := range info.offsetRanges {
+ rangesCopy[id] = off
+ }
+ callsCopy := make(map[int64]Expr, len(info.macroCalls))
+ for id, call := range info.macroCalls {
+ callsCopy[id] = defaultFactory.CopyExpr(call)
+ }
+ return &SourceInfo{
+ syntax: info.syntax,
+ desc: info.desc,
+ lines: info.lines,
+ baseLine: info.baseLine,
+ baseCol: info.baseCol,
+ offsetRanges: rangesCopy,
+ macroCalls: callsCopy,
+ }
+}
+
+// SourceInfo records basic information about the expression as a textual input and
+// as a parsed expression value.
+type SourceInfo struct {
+ syntax string
+ desc string
+ lines []int32
+ baseLine int32
+ baseCol int32
+ offsetRanges map[int64]OffsetRange
+ macroCalls map[int64]Expr
+}
+
+// SyntaxVersion returns the syntax version associated with the text expression.
+func (s *SourceInfo) SyntaxVersion() string {
+ if s == nil {
+ return ""
+ }
+ return s.syntax
+}
+
+// Description provides information about where the expression came from.
+func (s *SourceInfo) Description() string {
+ if s == nil {
+ return ""
+ }
+ return s.desc
+}
+
+// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear.
+func (s *SourceInfo) LineOffsets() []int32 {
+ if s == nil {
+ return []int32{}
+ }
+ return s.lines
+}
+
+// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression
+// node where the macro was inserted into the AST, and the ast.Expr value represents the original call
+// signature which was replaced.
+func (s *SourceInfo) MacroCalls() map[int64]Expr {
+ if s == nil {
+ return map[int64]Expr{}
+ }
+ return s.macroCalls
+}
+
+// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via
+// a macro replacement.
+//
+// Note, parsing options must be enabled to track macro calls before this method will return a value.
+func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) {
+ e, found := s.MacroCalls()[id]
+ return e, found
+}
+
+// SetMacroCall records a macro call at a specific location.
+func (s *SourceInfo) SetMacroCall(id int64, e Expr) {
+ if s != nil {
+ s.macroCalls[id] = e
+ }
+}
+
+// ClearMacroCall removes the macro call at the given expression id.
+func (s *SourceInfo) ClearMacroCall(id int64) {
+ if s != nil {
+ delete(s.macroCalls, id)
+ }
+}
+
+// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either:
+// the start and end position in the input stream where the expression occurs, or the start position
+// only. If the range only captures start position, the stop position of the range will be equal to
+// the start.
+func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange {
+ if s == nil {
+ return map[int64]OffsetRange{}
+ }
+ return s.offsetRanges
+}
+
+// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists.
+func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) {
+ if s == nil {
+ return OffsetRange{}, false
+ }
+ o, found := s.offsetRanges[id]
+ return o, found
+}
+
+// SetOffsetRange sets the OffsetRange for the given expression id.
+func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) {
+ if s == nil {
+ return
+ }
+ s.offsetRanges[id] = o
+}
+
+// ClearOffsetRange removes the OffsetRange for the given expression id.
+func (s *SourceInfo) ClearOffsetRange(id int64) {
+ if s != nil {
+ delete(s.offsetRanges, id)
+ }
+}
+
+// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character
+// of the expression node at the id.
+func (s *SourceInfo) GetStartLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Start)
+ }
+ return common.NoLocation
+}
+
+// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for
+// the expression node at the given id.
+//
+// If the SourceInfo was generated from a serialized protobuf representation, the stop location will
+// be identical to the start location for the expression.
+func (s *SourceInfo) GetStopLocation(id int64) common.Location {
+ if o, found := s.GetOffsetRange(id); found {
+ return s.GetLocationByOffset(o.Stop)
+ }
+ return common.NoLocation
+}
+
+// GetLocationByOffset returns the line and column information for a given character offset.
+func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location {
+ line := 1
+ col := int(offset)
+ for _, lineOffset := range s.LineOffsets() {
+ if lineOffset > offset {
+ break
+ }
+ line++
+ col = int(offset - lineOffset)
+ }
+ return common.NewLocation(line, col)
+}
+
+// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column.
+func (s *SourceInfo) ComputeOffset(line, col int32) int32 {
+ if s != nil {
+ line = s.baseLine + line
+ col = s.baseCol + col
+ }
+ if line == 1 {
+ return col
+ }
+ if line < 1 || line > int32(len(s.LineOffsets())) {
+ return -1
+ }
+ offset := s.LineOffsets()[line-2]
+ return offset + col
+}
+
+// OffsetRange captures the start and stop positions of a section of text in the input expression.
+type OffsetRange struct {
+ Start int32
+ Stop int32
+}
+
+// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
+// either a qualified identifier name, a set of overload ids, or a constant value from an enum.
+type ReferenceInfo struct {
+ Name string
+ OverloadIDs []string
+ Value ref.Val
+}
+
+// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value.
+func NewIdentReference(name string, value ref.Val) *ReferenceInfo {
+ return &ReferenceInfo{Name: name, Value: value}
+}
+
+// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads.
+func NewFunctionReference(overloads ...string) *ReferenceInfo {
+ info := &ReferenceInfo{}
+ for _, id := range overloads {
+ info.AddOverload(id)
+ }
+ return info
+}
+
+// AddOverload appends a function overload ID to the ReferenceInfo.
+func (r *ReferenceInfo) AddOverload(overloadID string) {
+ for _, id := range r.OverloadIDs {
+ if id == overloadID {
+ return
+ }
+ }
+ r.OverloadIDs = append(r.OverloadIDs, overloadID)
+}
+
+// Equals returns whether two references are identical to each other.
+func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
+ if r.Name != other.Name {
+ return false
+ }
+ if len(r.OverloadIDs) != len(other.OverloadIDs) {
+ return false
+ }
+ if len(r.OverloadIDs) != 0 {
+ overloadMap := make(map[string]struct{}, len(r.OverloadIDs))
+ for _, id := range r.OverloadIDs {
+ overloadMap[id] = struct{}{}
+ }
+ for _, id := range other.OverloadIDs {
+ _, found := overloadMap[id]
+ if !found {
+ return false
+ }
+ }
+ }
+ if r.Value == nil && other.Value == nil {
+ return true
+ }
+ if r.Value == nil && other.Value != nil ||
+ r.Value != nil && other.Value == nil ||
+ r.Value.Equal(other.Value) != types.True {
+ return false
+ }
+ return true
+}
+
+type maxIDVisitor struct {
+ maxID int64
+ *baseVisitor
+}
+
+// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed.
+func (v *maxIDVisitor) VisitExpr(e Expr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
+ }
+}
+
+// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed.
+func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.maxID < e.ID() {
+ v.maxID = e.ID()
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go
new file mode 100644
index 000000000..435d8f654
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/conversion.go
@@ -0,0 +1,659 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// ToProto converts an AST to a CheckedExpr protobouf.
+func ToProto(ast *AST) (*exprpb.CheckedExpr, error) {
+ refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap()))
+ for id, ref := range ast.ReferenceMap() {
+ r, err := ReferenceInfoToProto(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap()))
+ for id, typ := range ast.TypeMap() {
+ t, err := types.TypeToExprType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ e, err := ExprToProto(ast.Expr())
+ if err != nil {
+ return nil, err
+ }
+ info, err := SourceInfoToProto(ast.SourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.CheckedExpr{
+ Expr: e,
+ SourceInfo: info,
+ ReferenceMap: refMap,
+ TypeMap: typeMap,
+ }, nil
+}
+
+// ToAST converts a CheckedExpr protobuf to an AST instance.
+func ToAST(checked *exprpb.CheckedExpr) (*AST, error) {
+ refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
+ for id, ref := range checked.GetReferenceMap() {
+ r, err := ProtoToReferenceInfo(ref)
+ if err != nil {
+ return nil, err
+ }
+ refMap[id] = r
+ }
+ typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
+ for id, typ := range checked.GetTypeMap() {
+ t, err := types.ExprTypeToType(typ)
+ if err != nil {
+ return nil, err
+ }
+ typeMap[id] = t
+ }
+ info, err := ProtoToSourceInfo(checked.GetSourceInfo())
+ if err != nil {
+ return nil, err
+ }
+ root, err := ProtoToExpr(checked.GetExpr())
+ if err != nil {
+ return nil, err
+ }
+ ast := NewCheckedAST(NewAST(root, info), typeMap, refMap)
+ return ast, nil
+}
+
+// ProtoToExpr converts a protobuf Expr value to an ast.Expr value.
+func ProtoToExpr(e *exprpb.Expr) (Expr, error) {
+ factory := NewExprFactory()
+ return exprInternal(factory, e)
+}
+
+// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr
+func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ factory := NewExprFactory()
+ switch e.GetKeyKind().(type) {
+ case *exprpb.Expr_CreateStruct_Entry_FieldKey:
+ return exprStructField(factory, e.GetId(), e)
+ case *exprpb.Expr_CreateStruct_Entry_MapKey:
+ return exprMapEntry(factory, e.GetId(), e)
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) {
+ id := e.GetId()
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_CallExpr:
+ return exprCall(factory, id, e.GetCallExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ return exprComprehension(factory, id, e.GetComprehensionExpr())
+ case *exprpb.Expr_ConstExpr:
+ return exprLiteral(factory, id, e.GetConstExpr())
+ case *exprpb.Expr_IdentExpr:
+ return exprIdent(factory, id, e.GetIdentExpr())
+ case *exprpb.Expr_ListExpr:
+ return exprList(factory, id, e.GetListExpr())
+ case *exprpb.Expr_SelectExpr:
+ return exprSelect(factory, id, e.GetSelectExpr())
+ case *exprpb.Expr_StructExpr:
+ s := e.GetStructExpr()
+ if s.GetMessageName() != "" {
+ return exprStruct(factory, id, s)
+ }
+ return exprMap(factory, id, s)
+ }
+ return factory.NewUnspecifiedExpr(id), nil
+}
+
+func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) {
+ var err error
+ args := make([]Expr, len(call.GetArgs()))
+ for i, a := range call.GetArgs() {
+ args[i], err = exprInternal(factory, a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if call.GetTarget() == nil {
+ return factory.NewCall(id, call.GetFunction(), args...), nil
+ }
+
+ target, err := exprInternal(factory, call.GetTarget())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil
+}
+
+func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) {
+ iterRange, err := exprInternal(factory, comp.GetIterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := exprInternal(factory, comp.GetAccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := exprInternal(factory, comp.GetLoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := exprInternal(factory, comp.GetLoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := exprInternal(factory, comp.GetResult())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewComprehensionTwoVar(id,
+ iterRange,
+ comp.GetIterVar(),
+ comp.GetIterVar2(),
+ comp.GetAccuVar(),
+ accuInit,
+ loopCond,
+ loopStep,
+ result), nil
+}
+
+func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) {
+ val, err := ConstantToVal(c)
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewLiteral(id, val), nil
+}
+
+func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) {
+ return factory.NewIdent(id, i.GetName()), nil
+}
+
+func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) {
+ elems := make([]Expr, len(l.GetElements()))
+ for i, e := range l.GetElements() {
+ elem, err := exprInternal(factory, e)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elem
+ }
+ return factory.NewList(id, elems, l.GetOptionalIndices()), nil
+}
+
+func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ entries := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, entry := range s.GetEntries() {
+ entries[i], err = exprMapEntry(factory, entry.GetId(), entry)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewMap(id, entries), nil
+}
+
+func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ k, err := exprInternal(factory, e.GetMapKey())
+ if err != nil {
+ return nil, err
+ }
+ v, err := exprInternal(factory, e.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil
+}
+
+func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) {
+ op, err := exprInternal(factory, s.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ if s.GetTestOnly() {
+ return factory.NewPresenceTest(id, op, s.GetField()), nil
+ }
+ return factory.NewSelect(id, op, s.GetField()), nil
+}
+
+func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) {
+ fields := make([]EntryExpr, len(s.GetEntries()))
+ var err error
+ for i, field := range s.GetEntries() {
+ fields[i], err = exprStructField(factory, field.GetId(), field)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return factory.NewStruct(id, s.GetMessageName(), fields), nil
+}
+
+func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) {
+ v, err := exprInternal(factory, f.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil
+}
+
+// ExprToProto serializes an ast.Expr value to a protobuf Expr representation.
+func ExprToProto(e Expr) (*exprpb.Expr, error) {
+ if e == nil {
+ return &exprpb.Expr{}, nil
+ }
+ switch e.Kind() {
+ case CallKind:
+ return protoCall(e.ID(), e.AsCall())
+ case ComprehensionKind:
+ return protoComprehension(e.ID(), e.AsComprehension())
+ case IdentKind:
+ return protoIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ return protoList(e.ID(), e.AsList())
+ case LiteralKind:
+ return protoLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ return protoMap(e.ID(), e.AsMap())
+ case SelectKind:
+ return protoSelect(e.ID(), e.AsSelect())
+ case StructKind:
+ return protoStruct(e.ID(), e.AsStruct())
+ case UnspecifiedExprKind:
+ // Handle the case where a macro reference may be getting translated.
+ // A nested macro 'pointer' is a non-zero expression id with no kind set.
+ if e.ID() != 0 {
+ return &exprpb.Expr{Id: e.ID()}, nil
+ }
+ return &exprpb.Expr{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr kind: %v", e)
+}
+
+// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry
+func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) {
+ switch e.Kind() {
+ case MapEntryKind:
+ return protoMapEntry(e.ID(), e.AsMapEntry())
+ case StructFieldKind:
+ return protoStructField(e.ID(), e.AsStructField())
+ case UnspecifiedEntryExprKind:
+ return &exprpb.Expr_CreateStruct_Entry{}, nil
+ }
+ return nil, fmt.Errorf("unsupported expr entry kind: %v", e)
+}
+
+func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) {
+ var err error
+ var target *exprpb.Expr
+ if call.IsMemberFunction() {
+ target, err = ExprToProto(call.Target())
+ if err != nil {
+ return nil, err
+ }
+ }
+ callArgs := call.Args()
+ args := make([]*exprpb.Expr, len(callArgs))
+ for i, a := range callArgs {
+ args[i], err = ExprToProto(a)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: call.FunctionName(),
+ Target: target,
+ Args: args,
+ },
+ },
+ }, nil
+}
+
+func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) {
+ iterRange, err := ExprToProto(comp.IterRange())
+ if err != nil {
+ return nil, err
+ }
+ accuInit, err := ExprToProto(comp.AccuInit())
+ if err != nil {
+ return nil, err
+ }
+ loopCond, err := ExprToProto(comp.LoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ loopStep, err := ExprToProto(comp.LoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := ExprToProto(comp.Result())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ComprehensionExpr{
+ ComprehensionExpr: &exprpb.Expr_Comprehension{
+ IterVar: comp.IterVar(),
+ IterVar2: comp.IterVar2(),
+ IterRange: iterRange,
+ AccuVar: comp.AccuVar(),
+ AccuInit: accuInit,
+ LoopCondition: loopCond,
+ LoopStep: loopStep,
+ Result: result,
+ },
+ },
+ }, nil
+}
+
+func protoIdent(id int64, name string) (*exprpb.Expr, error) {
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_IdentExpr{
+ IdentExpr: &exprpb.Expr_Ident{
+ Name: name,
+ },
+ },
+ }, nil
+}
+
+func protoList(id int64, list ListExpr) (*exprpb.Expr, error) {
+ var err error
+ elems := make([]*exprpb.Expr, list.Size())
+ for i, e := range list.Elements() {
+ elems[i], err = ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ListExpr{
+ ListExpr: &exprpb.Expr_CreateList{
+ Elements: elems,
+ OptionalIndices: list.OptionalIndices(),
+ },
+ },
+ }, nil
+}
+
+func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) {
+ c, err := ValToConstant(val)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: c,
+ },
+ }, nil
+}
+
+func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries()))
+ var err error
+ for i, e := range m.Entries() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) {
+ k, err := ExprToProto(e.Key())
+ if err != nil {
+ return nil, err
+ }
+ v, err := ExprToProto(e.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{
+ MapKey: k,
+ },
+ Value: v,
+ OptionalEntry: e.IsOptional(),
+ }, nil
+}
+
+func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) {
+ op, err := ExprToProto(s.Operand())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_SelectExpr{
+ SelectExpr: &exprpb.Expr_Select{
+ Operand: op,
+ Field: s.FieldName(),
+ TestOnly: s.IsTestOnly(),
+ },
+ },
+ }, nil
+}
+
+func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) {
+ entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields()))
+ var err error
+ for i, e := range s.Fields() {
+ entries[i], err = EntryExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_StructExpr{
+ StructExpr: &exprpb.Expr_CreateStruct{
+ MessageName: s.TypeName(),
+ Entries: entries,
+ },
+ },
+ }, nil
+}
+
+func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) {
+ v, err := ExprToProto(f.Value())
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Expr_CreateStruct_Entry{
+ Id: id,
+ KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{
+ FieldKey: f.Name(),
+ },
+ Value: v,
+ OptionalEntry: f.IsOptional(),
+ }, nil
+}
+
+// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object.
+func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) {
+ if info == nil {
+ return &exprpb.SourceInfo{}, nil
+ }
+ sourceInfo := &exprpb.SourceInfo{
+ SyntaxVersion: info.SyntaxVersion(),
+ Location: info.Description(),
+ LineOffsets: info.LineOffsets(),
+ Positions: make(map[int64]int32, len(info.OffsetRanges())),
+ MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())),
+ }
+ for id, offset := range info.OffsetRanges() {
+ sourceInfo.Positions[id] = offset.Start
+ }
+ for id, e := range info.MacroCalls() {
+ call, err := ExprToProto(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.MacroCalls[id] = call
+ }
+ return sourceInfo, nil
+}
+
+// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value.
+func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) {
+ sourceInfo := &SourceInfo{
+ syntax: info.GetSyntaxVersion(),
+ desc: info.GetLocation(),
+ lines: info.GetLineOffsets(),
+ offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())),
+ macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())),
+ }
+ for id, offset := range info.GetPositions() {
+ sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset})
+ }
+ for id, e := range info.GetMacroCalls() {
+ call, err := ProtoToExpr(e)
+ if err != nil {
+ return nil, err
+ }
+ sourceInfo.SetMacroCall(id, call)
+ }
+ return sourceInfo, nil
+}
+
+// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
+func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) {
+ c, err := ValToConstant(info.Value)
+ if err != nil {
+ return nil, err
+ }
+ return &exprpb.Reference{
+ Name: info.Name,
+ OverloadId: info.OverloadIDs,
+ Value: c,
+ }, nil
+}
+
+// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
+func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
+ v, err := ConstantToVal(ref.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ return &ReferenceInfo{
+ Name: ref.GetName(),
+ OverloadIDs: ref.GetOverloadId(),
+ Value: v,
+ }, nil
+}
+
+// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
+//
+// Only simple scalar types are supported by this method.
+func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
+ if v == nil {
+ return nil, nil
+ }
+ switch v.Type() {
+ case types.BoolType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
+ case types.BytesType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
+ case types.DoubleType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
+ case types.IntType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
+ case types.NullType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case types.StringType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
+ case types.UintType:
+ return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
+}
+
+// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
+func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
+ return AlphaProtoConstantAsVal(c)
+}
+
+// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val.
+func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) {
+ if c == nil {
+ return nil, nil
+ }
+ canonical := &celpb.Constant{}
+ if err := convertProto(c, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoConstantAsVal(canonical)
+}
+
+// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val.
+func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) {
+ switch c.GetConstantKind().(type) {
+ case *celpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *celpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *celpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *celpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *celpb.Constant_NullValue:
+ return types.NullValue, nil
+ case *celpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *celpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go
new file mode 100644
index 000000000..9f55cb3b9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/expr.go
@@ -0,0 +1,884 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// ExprKind represents the expression node kind.
+type ExprKind int
+
+const (
+ // UnspecifiedExprKind represents an unset expression with no specified properties.
+ UnspecifiedExprKind ExprKind = iota
+
+ // CallKind represents a function call.
+ CallKind
+
+ // ComprehensionKind represents a comprehension expression generated by a macro.
+ ComprehensionKind
+
+ // IdentKind represents a simple variable, constant, or type identifier.
+ IdentKind
+
+ // ListKind represents a list literal expression.
+ ListKind
+
+ // LiteralKind represents a primitive scalar literal.
+ LiteralKind
+
+ // MapKind represents a map literal expression.
+ MapKind
+
+ // SelectKind represents a field selection expression.
+ SelectKind
+
+ // StructKind represents a struct literal expression.
+ StructKind
+)
+
+// Expr represents the base expression node in a CEL abstract syntax tree.
+//
+// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types
+// as indicated by the `As` methods.
+type Expr interface {
+ // ID of the expression as it appears in the AST
+ ID() int64
+
+ // Kind of the expression node. See ExprKind for the valid enum values.
+ Kind() ExprKind
+
+ // AsCall adapts the expr into a CallExpr
+ //
+ // The Kind() must be equal to a CallKind for the conversion to be well-defined.
+ AsCall() CallExpr
+
+ // AsComprehension adapts the expr into a ComprehensionExpr.
+ //
+ // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
+ AsComprehension() ComprehensionExpr
+
+ // AsIdent adapts the expr into an identifier string.
+ //
+ // The Kind() must be equal to an IdentKind for the conversion to be well-defined.
+ AsIdent() string
+
+ // AsLiteral adapts the expr into a constant ref.Val.
+ //
+ // The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
+ AsLiteral() ref.Val
+
+ // AsList adapts the expr into a ListExpr.
+ //
+ // The Kind() must be equal to a ListKind for the conversion to be well-defined.
+ AsList() ListExpr
+
+ // AsMap adapts the expr into a MapExpr.
+ //
+ // The Kind() must be equal to a MapKind for the conversion to be well-defined.
+ AsMap() MapExpr
+
+ // AsSelect adapts the expr into a SelectExpr.
+ //
+ // The Kind() must be equal to a SelectKind for the conversion to be well-defined.
+ AsSelect() SelectExpr
+
+ // AsStruct adapts the expr into a StructExpr.
+ //
+ // The Kind() must be equal to a StructKind for the conversion to be well-defined.
+ AsStruct() StructExpr
+
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ // SetKindCase replaces the contents of the current expression with the contents of the other.
+ //
+ // The SetKindCase takes ownership of any expression instances references within the input Expr.
+ // A shallow copy is made of the Expr value itself, but not a deep one.
+ //
+ // This method should only be used during AST rewrites using temporary Expr values.
+ SetKindCase(Expr)
+
+ // isExpr is a marker interface.
+ isExpr()
+}
+
+// EntryExprKind represents the possible EntryExpr kinds.
+type EntryExprKind int
+
+const (
+ // UnspecifiedEntryExprKind indicates that the entry expr is not set.
+ UnspecifiedEntryExprKind EntryExprKind = iota
+
+ // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions.
+ MapEntryKind
+
+ // StructFieldKind indicates that the entry is a StructField with a field name and initializer
+ // expression.
+ StructFieldKind
+)
+
+// EntryExpr represents the base entry expression in a CEL map or struct literal.
+type EntryExpr interface {
+ // ID of the entry as it appears in the AST.
+ ID() int64
+
+ // Kind of the entry expression node. See EntryExprKind for valid enum values.
+ Kind() EntryExprKind
+
+ // AsMapEntry casts the EntryExpr to a MapEntry.
+ //
+ // The Kind() must be equal to MapEntryKind for the conversion to be well-defined.
+ AsMapEntry() MapEntry
+
+ // AsStructField casts the EntryExpr to a StructField
+ //
+ // The Kind() must be equal to StructFieldKind for the conversion to be well-defined.
+ AsStructField() StructField
+
+ // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids.
+ RenumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+// IDGenerator produces unique ids suitable for tagging expression nodes
+type IDGenerator func(originalID int64) int64
+
+// CallExpr defines an interface for inspecting a function call and its arguments.
+type CallExpr interface {
+ // FunctionName returns the name of the function.
+ FunctionName() string
+
+ // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function
+ IsMemberFunction() bool
+
+ // Target returns the target of the expression if one is present.
+ Target() Expr
+
+ // Args returns the list of call arguments, excluding the target.
+ Args() []Expr
+
+ // marker interface method
+ isExpr()
+}
+
+// ListExpr defines an interface for inspecting a list literal expression.
+type ListExpr interface {
+ // Elements returns the list elements as navigable expressions.
+ Elements() []Expr
+
+ // OptionalIndicies returns the list of optional indices in the list literal.
+ OptionalIndices() []int32
+
+ // IsOptional indicates whether the given element index is optional.
+ IsOptional(int32) bool
+
+ // Size returns the number of elements in the list.
+ Size() int
+
+ // marker interface method
+ isExpr()
+}
+
+// SelectExpr defines an interface for inspecting a select expression.
+type SelectExpr interface {
+ // Operand returns the selection operand expression.
+ Operand() Expr
+
+ // FieldName returns the field name being selected from the operand.
+ FieldName() string
+
+ // IsTestOnly indicates whether the select expression is a presence test generated by a macro.
+ IsTestOnly() bool
+
+ // marker interface method
+ isExpr()
+}
+
+// MapExpr defines an interface for inspecting a map expression.
+type MapExpr interface {
+ // Entries returns the map key value pairs as EntryExpr values.
+ Entries() []EntryExpr
+
+ // Size returns the number of entries in the map.
+ Size() int
+
+ // marker interface method
+ isExpr()
+}
+
+// MapEntry defines an interface for inspecting a map entry.
+type MapEntry interface {
+ // Key returns the map entry key expression.
+ Key() Expr
+
+ // Value returns the map entry value expression.
+ Value() Expr
+
+ // IsOptional returns whether the entry is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isEntryExpr()
+}
+
+// StructExpr defines an interfaces for inspecting a struct and its field initializers.
+type StructExpr interface {
+ // TypeName returns the struct type name.
+ TypeName() string
+
+ // Fields returns the set of field initializers in the struct expression as EntryExpr values.
+ Fields() []EntryExpr
+
+ // marker interface method
+ isExpr()
+}
+
+// StructField defines an interface for inspecting a struct field initialization.
+type StructField interface {
+ // Name returns the name of the field.
+ Name() string
+
+ // Value returns the field initialization expression.
+ Value() Expr
+
+ // IsOptional returns whether the field is optional.
+ IsOptional() bool
+
+ // marker interface method
+ isEntryExpr()
+}
+
+// ComprehensionExpr defines an interface for inspecting a comprehension expression.
+type ComprehensionExpr interface {
+ // IterRange returns the iteration range expression.
+ IterRange() Expr
+
+ // IterVar returns the iteration variable name.
+ //
+ // For one-variable comprehensions, the iter var refers to the element value
+ // when iterating over a list, or the map key when iterating over a map.
+ //
+ // For two-variable comprehneions, the iter var refers to the list index or the
+ // map key.
+ IterVar() string
+
+ // IterVar2 returns the second iteration variable name.
+ //
+ // When the value is non-empty, the comprehension is a two-variable comprehension.
+ IterVar2() string
+
+ // HasIterVar2 returns true if the second iteration variable is non-empty.
+ HasIterVar2() bool
+
+ // AccuVar returns the accumulation variable name.
+ AccuVar() string
+
+ // AccuInit returns the accumulation variable initialization expression.
+ AccuInit() Expr
+
+ // LoopCondition returns the loop condition expression.
+ LoopCondition() Expr
+
+ // LoopStep returns the loop step expression.
+ LoopStep() Expr
+
+ // Result returns the comprehension result expression.
+ Result() Expr
+
+ // marker interface method
+ isExpr()
+}
+
+var _ Expr = &expr{}
+
+type expr struct {
+ id int64
+ exprKindCase
+}
+
+type exprKindCase interface {
+ Kind() ExprKind
+
+ renumberIDs(IDGenerator)
+
+ isExpr()
+}
+
+func (e *expr) ID() int64 {
+ if e == nil {
+ return 0
+ }
+ return e.id
+}
+
+func (e *expr) Kind() ExprKind {
+ if e == nil || e.exprKindCase == nil {
+ return UnspecifiedExprKind
+ }
+ return e.exprKindCase.Kind()
+}
+
+func (e *expr) AsCall() CallExpr {
+ if e.Kind() != CallKind {
+ return nilCall
+ }
+ return e.exprKindCase.(CallExpr)
+}
+
+func (e *expr) AsComprehension() ComprehensionExpr {
+ if e.Kind() != ComprehensionKind {
+ return nilCompre
+ }
+ return e.exprKindCase.(ComprehensionExpr)
+}
+
+func (e *expr) AsIdent() string {
+ if e.Kind() != IdentKind {
+ return ""
+ }
+ return string(e.exprKindCase.(baseIdentExpr))
+}
+
+func (e *expr) AsLiteral() ref.Val {
+ if e.Kind() != LiteralKind {
+ return nil
+ }
+ return e.exprKindCase.(*baseLiteral).Val
+}
+
+func (e *expr) AsList() ListExpr {
+ if e.Kind() != ListKind {
+ return nilList
+ }
+ return e.exprKindCase.(ListExpr)
+}
+
+func (e *expr) AsMap() MapExpr {
+ if e.Kind() != MapKind {
+ return nilMap
+ }
+ return e.exprKindCase.(MapExpr)
+}
+
+func (e *expr) AsSelect() SelectExpr {
+ if e.Kind() != SelectKind {
+ return nilSel
+ }
+ return e.exprKindCase.(SelectExpr)
+}
+
+func (e *expr) AsStruct() StructExpr {
+ if e.Kind() != StructKind {
+ return nilStruct
+ }
+ return e.exprKindCase.(StructExpr)
+}
+
+func (e *expr) SetKindCase(other Expr) {
+ if e == nil {
+ return
+ }
+ if other == nil {
+ e.exprKindCase = nil
+ return
+ }
+ switch other.Kind() {
+ case CallKind:
+ c := other.AsCall()
+ e.exprKindCase = &baseCallExpr{
+ function: c.FunctionName(),
+ target: c.Target(),
+ args: c.Args(),
+ isMember: c.IsMemberFunction(),
+ }
+ case ComprehensionKind:
+ c := other.AsComprehension()
+ e.exprKindCase = &baseComprehensionExpr{
+ iterRange: c.IterRange(),
+ iterVar: c.IterVar(),
+ iterVar2: c.IterVar2(),
+ accuVar: c.AccuVar(),
+ accuInit: c.AccuInit(),
+ loopCond: c.LoopCondition(),
+ loopStep: c.LoopStep(),
+ result: c.Result(),
+ }
+ case IdentKind:
+ e.exprKindCase = baseIdentExpr(other.AsIdent())
+ case ListKind:
+ l := other.AsList()
+ optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices()))
+ for _, idx := range l.OptionalIndices() {
+ optIndexMap[idx] = struct{}{}
+ }
+ e.exprKindCase = &baseListExpr{
+ elements: l.Elements(),
+ optIndices: l.OptionalIndices(),
+ optIndexMap: optIndexMap,
+ }
+ case LiteralKind:
+ e.exprKindCase = &baseLiteral{Val: other.AsLiteral()}
+ case MapKind:
+ e.exprKindCase = &baseMapExpr{
+ entries: other.AsMap().Entries(),
+ }
+ case SelectKind:
+ s := other.AsSelect()
+ e.exprKindCase = &baseSelectExpr{
+ operand: s.Operand(),
+ field: s.FieldName(),
+ testOnly: s.IsTestOnly(),
+ }
+ case StructKind:
+ s := other.AsStruct()
+ e.exprKindCase = &baseStructExpr{
+ typeName: s.TypeName(),
+ fields: s.Fields(),
+ }
+ case UnspecifiedExprKind:
+ e.exprKindCase = nil
+ }
+}
+
+func (e *expr) RenumberIDs(idGen IDGenerator) {
+ if e == nil {
+ return
+ }
+ e.id = idGen(e.id)
+ if e.exprKindCase != nil {
+ e.exprKindCase.renumberIDs(idGen)
+ }
+}
+
+type baseCallExpr struct {
+ function string
+ target Expr
+ args []Expr
+ isMember bool
+}
+
+func (*baseCallExpr) Kind() ExprKind {
+ return CallKind
+}
+
+func (e *baseCallExpr) FunctionName() string {
+ if e == nil {
+ return ""
+ }
+ return e.function
+}
+
+func (e *baseCallExpr) IsMemberFunction() bool {
+ if e == nil {
+ return false
+ }
+ return e.isMember
+}
+
+func (e *baseCallExpr) Target() Expr {
+ if e == nil || !e.IsMemberFunction() {
+ return nilExpr
+ }
+ return e.target
+}
+
+func (e *baseCallExpr) Args() []Expr {
+ if e == nil {
+ return []Expr{}
+ }
+ return e.args
+}
+
+func (e *baseCallExpr) renumberIDs(idGen IDGenerator) {
+ if e.IsMemberFunction() {
+ e.Target().RenumberIDs(idGen)
+ }
+ for _, arg := range e.Args() {
+ arg.RenumberIDs(idGen)
+ }
+}
+
+func (*baseCallExpr) isExpr() {}
+
+var _ ComprehensionExpr = &baseComprehensionExpr{}
+
+type baseComprehensionExpr struct {
+ iterRange Expr
+ iterVar string
+ iterVar2 string
+ accuVar string
+ accuInit Expr
+ loopCond Expr
+ loopStep Expr
+ result Expr
+}
+
+func (*baseComprehensionExpr) Kind() ExprKind {
+ return ComprehensionKind
+}
+
+func (e *baseComprehensionExpr) IterRange() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.iterRange
+}
+
+func (e *baseComprehensionExpr) IterVar() string {
+ return e.iterVar
+}
+
+func (e *baseComprehensionExpr) IterVar2() string {
+ return e.iterVar2
+}
+
+func (e *baseComprehensionExpr) HasIterVar2() bool {
+ return e.iterVar2 != ""
+}
+
+func (e *baseComprehensionExpr) AccuVar() string {
+ return e.accuVar
+}
+
+func (e *baseComprehensionExpr) AccuInit() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.accuInit
+}
+
+func (e *baseComprehensionExpr) LoopCondition() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopCond
+}
+
+func (e *baseComprehensionExpr) LoopStep() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.loopStep
+}
+
+func (e *baseComprehensionExpr) Result() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.result
+}
+
+func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) {
+ e.IterRange().RenumberIDs(idGen)
+ e.AccuInit().RenumberIDs(idGen)
+ e.LoopCondition().RenumberIDs(idGen)
+ e.LoopStep().RenumberIDs(idGen)
+ e.Result().RenumberIDs(idGen)
+}
+
+func (*baseComprehensionExpr) isExpr() {}
+
+var _ exprKindCase = baseIdentExpr("")
+
+type baseIdentExpr string
+
+func (baseIdentExpr) Kind() ExprKind {
+ return IdentKind
+}
+
+func (e baseIdentExpr) renumberIDs(IDGenerator) {}
+
+func (baseIdentExpr) isExpr() {}
+
+var _ exprKindCase = &baseLiteral{}
+var _ ref.Val = &baseLiteral{}
+
+type baseLiteral struct {
+ ref.Val
+}
+
+func (*baseLiteral) Kind() ExprKind {
+ return LiteralKind
+}
+
+func (l *baseLiteral) renumberIDs(IDGenerator) {}
+
+func (*baseLiteral) isExpr() {}
+
+var _ ListExpr = &baseListExpr{}
+
+type baseListExpr struct {
+ elements []Expr
+ optIndices []int32
+ optIndexMap map[int32]struct{}
+}
+
+func (*baseListExpr) Kind() ExprKind {
+ return ListKind
+}
+
+func (e *baseListExpr) Elements() []Expr {
+ if e == nil {
+ return []Expr{}
+ }
+ return e.elements
+}
+
+func (e *baseListExpr) IsOptional(index int32) bool {
+ _, found := e.optIndexMap[index]
+ return found
+}
+
+func (e *baseListExpr) OptionalIndices() []int32 {
+ if e == nil {
+ return []int32{}
+ }
+ return e.optIndices
+}
+
+func (e *baseListExpr) Size() int {
+ return len(e.Elements())
+}
+
+func (e *baseListExpr) renumberIDs(idGen IDGenerator) {
+ for _, elem := range e.Elements() {
+ elem.RenumberIDs(idGen)
+ }
+}
+
+func (*baseListExpr) isExpr() {}
+
+type baseMapExpr struct {
+ entries []EntryExpr
+}
+
+func (*baseMapExpr) Kind() ExprKind {
+ return MapKind
+}
+
+func (e *baseMapExpr) Entries() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.entries
+}
+
+func (e *baseMapExpr) Size() int {
+ return len(e.Entries())
+}
+
+func (e *baseMapExpr) renumberIDs(idGen IDGenerator) {
+ for _, entry := range e.Entries() {
+ entry.RenumberIDs(idGen)
+ }
+}
+
+func (*baseMapExpr) isExpr() {}
+
+type baseSelectExpr struct {
+ operand Expr
+ field string
+ testOnly bool
+}
+
+func (*baseSelectExpr) Kind() ExprKind {
+ return SelectKind
+}
+
+func (e *baseSelectExpr) Operand() Expr {
+ if e == nil || e.operand == nil {
+ return nilExpr
+ }
+ return e.operand
+}
+
+func (e *baseSelectExpr) FieldName() string {
+ if e == nil {
+ return ""
+ }
+ return e.field
+}
+
+func (e *baseSelectExpr) IsTestOnly() bool {
+ if e == nil {
+ return false
+ }
+ return e.testOnly
+}
+
+func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) {
+ e.Operand().RenumberIDs(idGen)
+}
+
+func (*baseSelectExpr) isExpr() {}
+
+type baseStructExpr struct {
+ typeName string
+ fields []EntryExpr
+}
+
+func (*baseStructExpr) Kind() ExprKind {
+ return StructKind
+}
+
+func (e *baseStructExpr) TypeName() string {
+ if e == nil {
+ return ""
+ }
+ return e.typeName
+}
+
+func (e *baseStructExpr) Fields() []EntryExpr {
+ if e == nil {
+ return []EntryExpr{}
+ }
+ return e.fields
+}
+
+func (e *baseStructExpr) renumberIDs(idGen IDGenerator) {
+ for _, f := range e.Fields() {
+ f.RenumberIDs(idGen)
+ }
+}
+
+func (*baseStructExpr) isExpr() {}
+
+type entryExprKindCase interface {
+ Kind() EntryExprKind
+
+ renumberIDs(IDGenerator)
+
+ isEntryExpr()
+}
+
+var _ EntryExpr = &entryExpr{}
+
+type entryExpr struct {
+ id int64
+ entryExprKindCase
+}
+
+func (e *entryExpr) ID() int64 {
+ return e.id
+}
+
+func (e *entryExpr) AsMapEntry() MapEntry {
+ if e.Kind() != MapEntryKind {
+ return nilMapEntry
+ }
+ return e.entryExprKindCase.(MapEntry)
+}
+
+func (e *entryExpr) AsStructField() StructField {
+ if e.Kind() != StructFieldKind {
+ return nilStructField
+ }
+ return e.entryExprKindCase.(StructField)
+}
+
+func (e *entryExpr) RenumberIDs(idGen IDGenerator) {
+ e.id = idGen(e.id)
+ e.entryExprKindCase.renumberIDs(idGen)
+}
+
+type baseMapEntry struct {
+ key Expr
+ value Expr
+ isOptional bool
+}
+
+func (e *baseMapEntry) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e *baseMapEntry) Key() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.key
+}
+
+func (e *baseMapEntry) Value() Expr {
+ if e == nil {
+ return nilExpr
+ }
+ return e.value
+}
+
+func (e *baseMapEntry) IsOptional() bool {
+ if e == nil {
+ return false
+ }
+ return e.isOptional
+}
+
+func (e *baseMapEntry) renumberIDs(idGen IDGenerator) {
+ e.Key().RenumberIDs(idGen)
+ e.Value().RenumberIDs(idGen)
+}
+
+func (*baseMapEntry) isEntryExpr() {}
+
+type baseStructField struct {
+ field string
+ value Expr
+ isOptional bool
+}
+
+func (f *baseStructField) Kind() EntryExprKind {
+ return StructFieldKind
+}
+
+func (f *baseStructField) Name() string {
+ if f == nil {
+ return ""
+ }
+ return f.field
+}
+
+func (f *baseStructField) Value() Expr {
+ if f == nil {
+ return nilExpr
+ }
+ return f.value
+}
+
+func (f *baseStructField) IsOptional() bool {
+ if f == nil {
+ return false
+ }
+ return f.isOptional
+}
+
+func (f *baseStructField) renumberIDs(idGen IDGenerator) {
+ f.Value().RenumberIDs(idGen)
+}
+
+func (*baseStructField) isEntryExpr() {}
+
+var (
+ nilExpr *expr = nil
+ nilCall *baseCallExpr = nil
+ nilCompre *baseComprehensionExpr = nil
+ nilList *baseListExpr = nil
+ nilMap *baseMapExpr = nil
+ nilMapEntry *baseMapEntry = nil
+ nilSel *baseSelectExpr = nil
+ nilStruct *baseStructExpr = nil
+ nilStructField *baseStructField = nil
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go
new file mode 100644
index 000000000..994806b79
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/factory.go
@@ -0,0 +1,313 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import "github.com/google/cel-go/common/types/ref"
+
+// ExprFactory interfaces defines a set of methods necessary for building native expression values.
+type ExprFactory interface {
+ // CopyExpr creates a deep copy of the input Expr value.
+ CopyExpr(Expr) Expr
+
+ // CopyEntryExpr creates a deep copy of the input EntryExpr value.
+ CopyEntryExpr(EntryExpr) EntryExpr
+
+ // NewCall creates an Expr value representing a global function call.
+ NewCall(id int64, function string, args ...Expr) Expr
+
+ // NewComprehension creates an Expr value representing a one-variable comprehension over a value range.
+ NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range.
+ NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr
+
+ // NewMemberCall creates an Expr value representing a member function call.
+ NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr
+
+ // NewIdent creates an Expr value representing an identifier.
+ NewIdent(id int64, name string) Expr
+
+ // NewAccuIdent creates an Expr value representing an accumulator identifier within a
+ //comprehension.
+ NewAccuIdent(id int64) Expr
+
+ // NewLiteral creates an Expr value representing a literal value, such as a string or integer.
+ NewLiteral(id int64, value ref.Val) Expr
+
+ // NewList creates an Expr value representing a list literal expression with optional indices.
+ //
+ // Optional indicies will typically be empty unless the CEL optional types are enabled.
+ NewList(id int64, elems []Expr, optIndices []int32) Expr
+
+ // NewMap creates an Expr value representing a map literal expression
+ NewMap(id int64, entries []EntryExpr) Expr
+
+ // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether
+ // the key is optionally set.
+ NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr
+
+ // NewPresenceTest creates an Expr representing a field presence test on an operand expression.
+ NewPresenceTest(id int64, operand Expr, field string) Expr
+
+ // NewSelect creates an Expr representing a field selection on an operand expression.
+ NewSelect(id int64, operand Expr, field string) Expr
+
+ // NewStruct creates an Expr value representing a struct literal with a given type name and a
+ // set of field initializers.
+ NewStruct(id int64, typeName string, fields []EntryExpr) Expr
+
+ // NewStructField creates a StructField with a given field name, value, and a flag indicating
+ // whether the field is optionally set.
+ NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr
+
+ // NewUnspecifiedExpr creates an empty expression node.
+ NewUnspecifiedExpr(id int64) Expr
+
+ isExprFactory()
+}
+
+type baseExprFactory struct{}
+
+// NewExprFactory creates an ExprFactory instance.
+func NewExprFactory() ExprFactory {
+ return &baseExprFactory{}
+}
+
+func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: nilExpr,
+ args: args,
+ isMember: false,
+ })
+}
+
+func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr {
+ if len(args) == 0 {
+ args = []Expr{}
+ }
+ return fac.newExpr(
+ id,
+ &baseCallExpr{
+ function: function,
+ target: target,
+ args: args,
+ isMember: true,
+ })
+}
+
+func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ // Set the iter_var2 to empty string to indicate the second variable is omitted
+ return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result)
+}
+
+func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr {
+ return fac.newExpr(
+ id,
+ &baseComprehensionExpr{
+ iterRange: iterRange,
+ iterVar: iterVar,
+ iterVar2: iterVar2,
+ accuVar: accuVar,
+ accuInit: accuInit,
+ loopCond: loopCond,
+ loopStep: loopStep,
+ result: result,
+ })
+}
+
+func (fac *baseExprFactory) NewIdent(id int64, name string) Expr {
+ return fac.newExpr(id, baseIdentExpr(name))
+}
+
+func (fac *baseExprFactory) NewAccuIdent(id int64) Expr {
+ return fac.NewIdent(id, "__result__")
+}
+
+func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr {
+ return fac.newExpr(id, &baseLiteral{Val: value})
+}
+
+func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr {
+ optIndexMap := make(map[int32]struct{}, len(optIndices))
+ for _, idx := range optIndices {
+ optIndexMap[idx] = struct{}{}
+ }
+ return fac.newExpr(id,
+ &baseListExpr{
+ elements: elems,
+ optIndices: optIndices,
+ optIndexMap: optIndexMap,
+ })
+}
+
+func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr {
+ return fac.newExpr(id, &baseMapExpr{entries: entries})
+}
+
+func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseMapEntry{
+ key: key,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ testOnly: true,
+ })
+}
+
+func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr {
+ return fac.newExpr(
+ id,
+ &baseSelectExpr{
+ operand: operand,
+ field: field,
+ })
+}
+
+func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr {
+ return fac.newExpr(
+ id,
+ &baseStructExpr{
+ typeName: typeName,
+ fields: fields,
+ })
+}
+
+func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr {
+ return fac.newEntryExpr(
+ id,
+ &baseStructField{
+ field: field,
+ value: value,
+ isOptional: isOptional,
+ })
+}
+
+func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr {
+ return fac.newExpr(id, nil)
+}
+
+func (fac *baseExprFactory) CopyExpr(e Expr) Expr {
+ // unwrap navigable expressions to avoid unnecessary allocations during copying.
+ if nav, ok := e.(*navigableExprImpl); ok {
+ e = nav.Expr
+ }
+ switch e.Kind() {
+ case CallKind:
+ c := e.AsCall()
+ argsCopy := make([]Expr, len(c.Args()))
+ for i, arg := range c.Args() {
+ argsCopy[i] = fac.CopyExpr(arg)
+ }
+ if !c.IsMemberFunction() {
+ return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...)
+ }
+ return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...)
+ case ComprehensionKind:
+ compre := e.AsComprehension()
+ return fac.NewComprehensionTwoVar(e.ID(),
+ fac.CopyExpr(compre.IterRange()),
+ compre.IterVar(),
+ compre.IterVar2(),
+ compre.AccuVar(),
+ fac.CopyExpr(compre.AccuInit()),
+ fac.CopyExpr(compre.LoopCondition()),
+ fac.CopyExpr(compre.LoopStep()),
+ fac.CopyExpr(compre.Result()))
+ case IdentKind:
+ return fac.NewIdent(e.ID(), e.AsIdent())
+ case ListKind:
+ l := e.AsList()
+ elemsCopy := make([]Expr, l.Size())
+ for i, elem := range l.Elements() {
+ elemsCopy[i] = fac.CopyExpr(elem)
+ }
+ return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices())
+ case LiteralKind:
+ return fac.NewLiteral(e.ID(), e.AsLiteral())
+ case MapKind:
+ m := e.AsMap()
+ entriesCopy := make([]EntryExpr, m.Size())
+ for i, entry := range m.Entries() {
+ entriesCopy[i] = fac.CopyEntryExpr(entry)
+ }
+ return fac.NewMap(e.ID(), entriesCopy)
+ case SelectKind:
+ s := e.AsSelect()
+ if s.IsTestOnly() {
+ return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ }
+ return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName())
+ case StructKind:
+ s := e.AsStruct()
+ fieldsCopy := make([]EntryExpr, len(s.Fields()))
+ for i, field := range s.Fields() {
+ fieldsCopy[i] = fac.CopyEntryExpr(field)
+ }
+ return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy)
+ default:
+ return fac.NewUnspecifiedExpr(e.ID())
+ }
+}
+
+func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr {
+ switch e.Kind() {
+ case MapEntryKind:
+ entry := e.AsMapEntry()
+ return fac.NewMapEntry(e.ID(),
+ fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional())
+ case StructFieldKind:
+ field := e.AsStructField()
+ return fac.NewStructField(e.ID(),
+ field.Name(), fac.CopyExpr(field.Value()), field.IsOptional())
+ default:
+ return fac.newEntryExpr(e.ID(), nil)
+ }
+}
+
+func (*baseExprFactory) isExprFactory() {}
+
+func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr {
+ return &expr{
+ id: id,
+ exprKindCase: e,
+ }
+}
+
+func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr {
+ return &entryExpr{
+ id: id,
+ entryExprKindCase: e,
+ }
+}
+
+var (
+ defaultFactory = &baseExprFactory{}
+)
diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go
new file mode 100644
index 000000000..d7a90fb7c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/ast/navigable.go
@@ -0,0 +1,660 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ast
+
+import (
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// NavigableExpr represents the base navigable expression value with methods to inspect the
+// parent and child expressions.
+type NavigableExpr interface {
+ Expr
+
+ // Type of the expression.
+ //
+ // If the expression is type-checked, the type check metadata is returned. If the expression
+ // has not been type-checked, the types.DynType value is returned.
+ Type() *types.Type
+
+ // Parent returns the parent expression node, if one exists.
+ Parent() (NavigableExpr, bool)
+
+ // Children returns a list of child expression nodes.
+ Children() []NavigableExpr
+
+ // Depth indicates the depth in the expression tree.
+ //
+ // The root expression has depth 0.
+ Depth() int
+}
+
+// NavigateAST converts an AST to a NavigableExpr
+func NavigateAST(ast *AST) NavigableExpr {
+ return NavigateExpr(ast, ast.Expr())
+}
+
+// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST.
+//
+// If the expression is already a NavigableExpr, the parent and depth information will be
+// propagated on the new NavigableExpr value; otherwise, the expr value will be treated
+// as though it is the root of the expression graph with a depth of 0.
+func NavigateExpr(ast *AST, expr Expr) NavigableExpr {
+ depth := 0
+ var parent NavigableExpr = nil
+ if nav, ok := expr.(NavigableExpr); ok {
+ depth = nav.Depth()
+ parent, _ = nav.Parent()
+ }
+ return newNavigableExpr(ast, parent, expr, depth)
+}
+
+// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
+//
+// This function type should be use with the `Match` and `MatchList` calls.
+type ExprMatcher func(NavigableExpr) bool
+
+// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
+// is comprised of all constant values, such as a simple literal or even list and map literal.
+func ConstantValueMatcher() ExprMatcher {
+ return matchIsConstantValue
+}
+
+// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
+// the specified `kind`.
+func KindMatcher(kind ExprKind) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ return e.Kind() == kind
+ }
+}
+
+// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
+// function name is equal to `funcName`.
+func FunctionMatcher(funcName string) ExprMatcher {
+ return func(e NavigableExpr) bool {
+ if e.Kind() != CallKind {
+ return false
+ }
+ return e.AsCall().FunctionName() == funcName
+ }
+}
+
+// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
+//
+// Such a result would work well with subsequent MatchList calls.
+func AllMatcher() ExprMatcher {
+ return func(NavigableExpr) bool {
+ return true
+ }
+}
+
+// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values
+// matching the input criteria in post-order (bottom up).
+func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ visit(expr, navVisitor, postOrder, 0, 0)
+ return matches
+}
+
+// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
+// subset of NavigableExpr values which match.
+func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
+ matches := []NavigableExpr{}
+ navVisitor := &baseVisitor{
+ visitExpr: func(e Expr) {
+ nav := e.(NavigableExpr)
+ if matcher(nav) {
+ matches = append(matches, nav)
+ }
+ },
+ }
+ for _, expr := range exprs {
+ visit(expr, navVisitor, postOrder, 0, 1)
+ }
+ return matches
+}
+
+// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph.
+type Visitor interface {
+ // VisitExpr visits the input expression.
+ VisitExpr(Expr)
+
+ // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry.
+ VisitEntryExpr(EntryExpr)
+}
+
+type baseVisitor struct {
+ visitExpr func(Expr)
+ visitEntryExpr func(EntryExpr)
+}
+
+// VisitExpr visits the Expr if the internal expr visitor has been configured.
+func (v *baseVisitor) VisitExpr(e Expr) {
+ if v.visitExpr != nil {
+ v.visitExpr(e)
+ }
+}
+
+// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured.
+func (v *baseVisitor) VisitEntryExpr(e EntryExpr) {
+ if v.visitEntryExpr != nil {
+ v.visitEntryExpr(e)
+ }
+}
+
+// NewExprVisitor creates a visitor which only visits expression nodes.
+func NewExprVisitor(v func(Expr)) Visitor {
+ return &baseVisitor{
+ visitExpr: v,
+ visitEntryExpr: nil,
+ }
+}
+
+// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up).
+func PostOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, postOrder, 0, 0)
+}
+
+// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down).
+func PreOrderVisit(expr Expr, visitor Visitor) {
+ visit(expr, visitor, preOrder, 0, 0)
+}
+
+type visitOrder int
+
+const (
+ preOrder = iota + 1
+ postOrder
+)
+
+// TODO: consider exposing a way to configure a limit for the max visit depth.
+// It's possible that we could want to configure this on the NewExprVisitor()
+// and through MatchDescendents() / MaxID().
+func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) {
+ if maxDepth > 0 && depth == maxDepth {
+ return
+ }
+ if order == preOrder {
+ visitor.VisitExpr(expr)
+ }
+ switch expr.Kind() {
+ case CallKind:
+ c := expr.AsCall()
+ if c.IsMemberFunction() {
+ visit(c.Target(), visitor, order, depth+1, maxDepth)
+ }
+ for _, arg := range c.Args() {
+ visit(arg, visitor, order, depth+1, maxDepth)
+ }
+ case ComprehensionKind:
+ c := expr.AsComprehension()
+ visit(c.IterRange(), visitor, order, depth+1, maxDepth)
+ visit(c.AccuInit(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopCondition(), visitor, order, depth+1, maxDepth)
+ visit(c.LoopStep(), visitor, order, depth+1, maxDepth)
+ visit(c.Result(), visitor, order, depth+1, maxDepth)
+ case ListKind:
+ l := expr.AsList()
+ for _, elem := range l.Elements() {
+ visit(elem, visitor, order, depth+1, maxDepth)
+ }
+ case MapKind:
+ m := expr.AsMap()
+ for _, e := range m.Entries() {
+ if order == preOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ entry := e.AsMapEntry()
+ visit(entry.Key(), visitor, order, depth+1, maxDepth)
+ visit(entry.Value(), visitor, order, depth+1, maxDepth)
+ if order == postOrder {
+ visitor.VisitEntryExpr(e)
+ }
+ }
+ case SelectKind:
+ visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth)
+ case StructKind:
+ s := expr.AsStruct()
+ for _, f := range s.Fields() {
+ visitor.VisitEntryExpr(f)
+ visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth)
+ }
+ }
+ if order == postOrder {
+ visitor.VisitExpr(expr)
+ }
+}
+
+func matchIsConstantValue(e NavigableExpr) bool {
+ if e.Kind() == LiteralKind {
+ return true
+ }
+ if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
+ for _, child := range e.Children() {
+ if !matchIsConstantValue(child) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr {
+ // Reduce navigable expression nesting by unwrapping the embedded Expr value.
+ if nav, ok := expr.(*navigableExprImpl); ok {
+ expr = nav.Expr
+ }
+ nav := &navigableExprImpl{
+ Expr: expr,
+ depth: depth,
+ ast: ast,
+ parent: parent,
+ createChildren: getChildFactory(expr),
+ }
+ return nav
+}
+
+type navigableExprImpl struct {
+ Expr
+ depth int
+ ast *AST
+ parent NavigableExpr
+ createChildren childFactory
+}
+
+func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
+ if nav.parent != nil {
+ return nav.parent, true
+ }
+ return nil, false
+}
+
+func (nav *navigableExprImpl) ID() int64 {
+ return nav.Expr.ID()
+}
+
+func (nav *navigableExprImpl) Kind() ExprKind {
+ return nav.Expr.Kind()
+}
+
+func (nav *navigableExprImpl) Type() *types.Type {
+ return nav.ast.GetType(nav.ID())
+}
+
+func (nav *navigableExprImpl) Children() []NavigableExpr {
+ return nav.createChildren(nav)
+}
+
+func (nav *navigableExprImpl) Depth() int {
+ return nav.depth
+}
+
+func (nav *navigableExprImpl) AsCall() CallExpr {
+ return navigableCallImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr {
+ return navigableComprehensionImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsIdent() string {
+ return nav.Expr.AsIdent()
+}
+
+func (nav *navigableExprImpl) AsList() ListExpr {
+ return navigableListImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsLiteral() ref.Val {
+ return nav.Expr.AsLiteral()
+}
+
+func (nav *navigableExprImpl) AsMap() MapExpr {
+ return navigableMapImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsSelect() SelectExpr {
+ return navigableSelectImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) AsStruct() StructExpr {
+ return navigableStructImpl{navigableExprImpl: nav}
+}
+
+func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr {
+ return newNavigableExpr(nav.ast, nav, e, nav.depth+1)
+}
+
+func (nav *navigableExprImpl) isExpr() {}
+
+type navigableCallImpl struct {
+ *navigableExprImpl
+}
+
+func (call navigableCallImpl) FunctionName() string {
+ return call.Expr.AsCall().FunctionName()
+}
+
+func (call navigableCallImpl) IsMemberFunction() bool {
+ return call.Expr.AsCall().IsMemberFunction()
+}
+
+func (call navigableCallImpl) Target() Expr {
+ t := call.Expr.AsCall().Target()
+ if t != nil {
+ return call.createChild(t)
+ }
+ return nil
+}
+
+func (call navigableCallImpl) Args() []Expr {
+ args := call.Expr.AsCall().Args()
+ navArgs := make([]Expr, len(args))
+ for i, a := range args {
+ navArgs[i] = call.createChild(a)
+ }
+ return navArgs
+}
+
+type navigableComprehensionImpl struct {
+ *navigableExprImpl
+}
+
+func (comp navigableComprehensionImpl) IterRange() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().IterRange())
+}
+
+func (comp navigableComprehensionImpl) IterVar() string {
+ return comp.Expr.AsComprehension().IterVar()
+}
+
+func (comp navigableComprehensionImpl) IterVar2() string {
+ return comp.Expr.AsComprehension().IterVar2()
+}
+
+func (comp navigableComprehensionImpl) HasIterVar2() bool {
+ return comp.Expr.AsComprehension().HasIterVar2()
+}
+
+func (comp navigableComprehensionImpl) AccuVar() string {
+ return comp.Expr.AsComprehension().AccuVar()
+}
+
+func (comp navigableComprehensionImpl) AccuInit() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().AccuInit())
+}
+
+func (comp navigableComprehensionImpl) LoopCondition() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopCondition())
+}
+
+func (comp navigableComprehensionImpl) LoopStep() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().LoopStep())
+}
+
+func (comp navigableComprehensionImpl) Result() Expr {
+ return comp.createChild(comp.Expr.AsComprehension().Result())
+}
+
+type navigableListImpl struct {
+ *navigableExprImpl
+}
+
+func (l navigableListImpl) Elements() []Expr {
+ pbElems := l.Expr.AsList().Elements()
+ elems := make([]Expr, len(pbElems))
+ for i := 0; i < len(pbElems); i++ {
+ elems[i] = l.createChild(pbElems[i])
+ }
+ return elems
+}
+
+func (l navigableListImpl) IsOptional(index int32) bool {
+ return l.Expr.AsList().IsOptional(index)
+}
+
+func (l navigableListImpl) OptionalIndices() []int32 {
+ return l.Expr.AsList().OptionalIndices()
+}
+
+func (l navigableListImpl) Size() int {
+ return l.Expr.AsList().Size()
+}
+
+type navigableMapImpl struct {
+ *navigableExprImpl
+}
+
+func (m navigableMapImpl) Entries() []EntryExpr {
+ mapExpr := m.Expr.AsMap()
+ entries := make([]EntryExpr, len(mapExpr.Entries()))
+ for i, e := range mapExpr.Entries() {
+ entry := e.AsMapEntry()
+ entries[i] = &entryExpr{
+ id: e.ID(),
+ entryExprKindCase: navigableEntryImpl{
+ key: m.createChild(entry.Key()),
+ val: m.createChild(entry.Value()),
+ isOpt: entry.IsOptional(),
+ },
+ }
+ }
+ return entries
+}
+
+func (m navigableMapImpl) Size() int {
+ return m.Expr.AsMap().Size()
+}
+
+type navigableEntryImpl struct {
+ key NavigableExpr
+ val NavigableExpr
+ isOpt bool
+}
+
+func (e navigableEntryImpl) Kind() EntryExprKind {
+ return MapEntryKind
+}
+
+func (e navigableEntryImpl) Key() Expr {
+ return e.key
+}
+
+func (e navigableEntryImpl) Value() Expr {
+ return e.val
+}
+
+func (e navigableEntryImpl) IsOptional() bool {
+ return e.isOpt
+}
+
+func (e navigableEntryImpl) renumberIDs(IDGenerator) {}
+
+func (e navigableEntryImpl) isEntryExpr() {}
+
+type navigableSelectImpl struct {
+ *navigableExprImpl
+}
+
+func (sel navigableSelectImpl) FieldName() string {
+ return sel.Expr.AsSelect().FieldName()
+}
+
+func (sel navigableSelectImpl) IsTestOnly() bool {
+ return sel.Expr.AsSelect().IsTestOnly()
+}
+
+func (sel navigableSelectImpl) Operand() Expr {
+ return sel.createChild(sel.Expr.AsSelect().Operand())
+}
+
+type navigableStructImpl struct {
+ *navigableExprImpl
+}
+
+func (s navigableStructImpl) TypeName() string {
+ return s.Expr.AsStruct().TypeName()
+}
+
+func (s navigableStructImpl) Fields() []EntryExpr {
+ fieldInits := s.Expr.AsStruct().Fields()
+ fields := make([]EntryExpr, len(fieldInits))
+ for i, f := range fieldInits {
+ field := f.AsStructField()
+ fields[i] = &entryExpr{
+ id: f.ID(),
+ entryExprKindCase: navigableFieldImpl{
+ name: field.Name(),
+ val: s.createChild(field.Value()),
+ isOpt: field.IsOptional(),
+ },
+ }
+ }
+ return fields
+}
+
+type navigableFieldImpl struct {
+ name string
+ val NavigableExpr
+ isOpt bool
+}
+
+func (f navigableFieldImpl) Kind() EntryExprKind {
+ return StructFieldKind
+}
+
+func (f navigableFieldImpl) Name() string {
+ return f.name
+}
+
+func (f navigableFieldImpl) Value() Expr {
+ return f.val
+}
+
+func (f navigableFieldImpl) IsOptional() bool {
+ return f.isOpt
+}
+
+func (f navigableFieldImpl) renumberIDs(IDGenerator) {}
+
+func (f navigableFieldImpl) isEntryExpr() {}
+
+func getChildFactory(expr Expr) childFactory {
+ if expr == nil {
+ return noopFactory
+ }
+ switch expr.Kind() {
+ case LiteralKind:
+ return noopFactory
+ case IdentKind:
+ return noopFactory
+ case SelectKind:
+ return selectFactory
+ case CallKind:
+ return callArgFactory
+ case ListKind:
+ return listElemFactory
+ case MapKind:
+ return mapEntryFactory
+ case StructKind:
+ return structEntryFactory
+ case ComprehensionKind:
+ return comprehensionFactory
+ default:
+ return noopFactory
+ }
+}
+
+type childFactory func(*navigableExprImpl) []NavigableExpr
+
+func noopFactory(*navigableExprImpl) []NavigableExpr {
+ return nil
+}
+
+func selectFactory(nav *navigableExprImpl) []NavigableExpr {
+ return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())}
+}
+
+func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
+ call := nav.Expr.AsCall()
+ argCount := len(call.Args())
+ if call.IsMemberFunction() {
+ argCount++
+ }
+ navExprs := make([]NavigableExpr, argCount)
+ i := 0
+ if call.IsMemberFunction() {
+ navExprs[i] = nav.createChild(call.Target())
+ i++
+ }
+ for _, arg := range call.Args() {
+ navExprs[i] = nav.createChild(arg)
+ i++
+ }
+ return navExprs
+}
+
+func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
+ l := nav.Expr.AsList()
+ navExprs := make([]NavigableExpr, len(l.Elements()))
+ for i, e := range l.Elements() {
+ navExprs[i] = nav.createChild(e)
+ }
+ return navExprs
+}
+
+func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ s := nav.Expr.AsStruct()
+ entries := make([]NavigableExpr, len(s.Fields()))
+ for i, e := range s.Fields() {
+ f := e.AsStructField()
+ entries[i] = nav.createChild(f.Value())
+ }
+ return entries
+}
+
+func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
+ m := nav.Expr.AsMap()
+ entries := make([]NavigableExpr, len(m.Entries())*2)
+ j := 0
+ for _, e := range m.Entries() {
+ mapEntry := e.AsMapEntry()
+ entries[j] = nav.createChild(mapEntry.Key())
+ entries[j+1] = nav.createChild(mapEntry.Value())
+ j += 2
+ }
+ return entries
+}
+
+func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
+ compre := nav.Expr.AsComprehension()
+ return []NavigableExpr{
+ nav.createChild(compre.IterRange()),
+ nav.createChild(compre.AccuInit()),
+ nav.createChild(compre.LoopCondition()),
+ nav.createChild(compre.LoopStep()),
+ nav.createChild(compre.Result()),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
new file mode 100644
index 000000000..81197f064
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "container.go",
+ ],
+ importpath = "github.com/google/cel-go/common/containers",
+ deps = [
+ "//common/ast:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "container_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/ast:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go
new file mode 100644
index 000000000..3097a3f78
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/containers/container.go
@@ -0,0 +1,328 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package containers defines types and functions for resolving qualified names within a namespace
+// or type provided to CEL.
+package containers
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/ast"
+)
+
+var (
+ // DefaultContainer has an empty container name.
+ DefaultContainer *Container = nil
+
+ // Empty map to search for aliases when needed.
+ noAliases = make(map[string]string)
+)
+
+// NewContainer creates a new Container with the fully-qualified name.
+func NewContainer(opts ...ContainerOption) (*Container, error) {
+ var c *Container
+ var err error
+ for _, opt := range opts {
+ c, err = opt(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+}
+
+// Container holds a reference to an optional qualified container name and set of aliases.
+//
+// The program container can be used to simplify variable, function, and type specification within
+// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more
+// details.
+type Container struct {
+ name string
+ aliases map[string]string
+}
+
+// Extend creates a new Container with the existing settings and applies a series of
+// ContainerOptions to further configure the new container.
+func (c *Container) Extend(opts ...ContainerOption) (*Container, error) {
+ if c == nil {
+ return NewContainer(opts...)
+ }
+ // Copy the name and aliases of the existing container.
+ ext := &Container{name: c.Name()}
+ if len(c.aliasSet()) > 0 {
+ aliasSet := make(map[string]string, len(c.aliasSet()))
+ for k, v := range c.aliasSet() {
+ aliasSet[k] = v
+ }
+ ext.aliases = aliasSet
+ }
+ // Apply the new options to the container.
+ var err error
+ for _, opt := range opts {
+ ext, err = opt(ext)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ext, nil
+}
+
+// Name returns the fully-qualified name of the container.
+//
+// The name may conceptually be a namespace, package, or type.
+func (c *Container) Name() string {
+ if c == nil {
+ return ""
+ }
+ return c.name
+}
+
+// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution
+// order.
+//
+// Names which shadow other names are returned first. If a name includes a leading dot ('.'),
+// the name is treated as an absolute identifier which cannot be shadowed.
+//
+// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order:
+//
+// a.b.c.M.N.R.s
+// a.b.c.M.R.s
+// a.b.c.R.s
+// a.b.R.s
+// a.R.s
+// R.s
+//
+// If aliases or abbreviations are configured for the container, then alias names will take
+// precedence over containerized names.
+func (c *Container) ResolveCandidateNames(name string) []string {
+ if strings.HasPrefix(name, ".") {
+ qn := name[1:]
+ alias, isAlias := c.findAlias(qn)
+ if isAlias {
+ return []string{alias}
+ }
+ return []string{qn}
+ }
+ alias, isAlias := c.findAlias(name)
+ if isAlias {
+ return []string{alias}
+ }
+ if c.Name() == "" {
+ return []string{name}
+ }
+ nextCont := c.Name()
+ candidates := []string{nextCont + "." + name}
+ for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") {
+ nextCont = nextCont[:i]
+ candidates = append(candidates, nextCont+"."+name)
+ }
+ return append(candidates, name)
+}
+
+// aliasSet returns the alias to fully-qualified name mapping stored in the container.
+func (c *Container) aliasSet() map[string]string {
+ if c == nil || c.aliases == nil {
+ return noAliases
+ }
+ return c.aliases
+}
+
+// findAlias takes a name as input and returns an alias expansion if one exists.
+//
+// If the name is qualified, the first component of the qualified name is checked against known
+// aliases. Any alias that is found in a qualified name is expanded in the result:
+//
+// alias: R -> my.alias.R
+// name: R.S.T
+// output: my.alias.R.S.T
+//
+// Note, the name must not have a leading dot.
+func (c *Container) findAlias(name string) (string, bool) {
+ // If an alias exists for the name, ensure it is searched last.
+ simple := name
+ qualifier := ""
+ dot := strings.Index(name, ".")
+ if dot >= 0 {
+ simple = name[0:dot]
+ qualifier = name[dot:]
+ }
+ alias, found := c.aliasSet()[simple]
+ if !found {
+ return "", false
+ }
+ return alias + qualifier, true
+}
+
+// ContainerOption specifies a functional configuration option for a Container.
+//
+// Note, ContainerOption implementations must be able to handle nil container inputs.
+type ContainerOption func(*Container) (*Container, error)
+
+// Abbrevs configures a set of simple names as abbreviations for fully-qualified names.
+//
+// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name.
+// Abbreviations can be useful when working with variables, functions, and especially types from
+// multiple namespaces:
+//
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
+//
+// Only one the qualified names above may be used as the CEL container, so at least one of these
+// references must be a long qualified name within an otherwise short CEL program. Using the
+// following abbreviations, the program becomes much simpler:
+//
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
+//
+// There are a few rules for the qualified names and the simple abbreviations generated from them:
+// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
+// - The last element in the qualified name is the abbreviation.
+// - Abbreviations must not collide with each other.
+// - The abbreviation must not collide with unqualified names in use.
+//
+// Abbreviations are distinct from container-based references in the following important ways:
+// - Abbreviations must expand to a fully-qualified name.
+// - Expanded abbreviations do not participate in namespace resolution.
+// - Abbreviation expansion is done instead of the container search for a matching identifier.
+// - Containers follow C++ namespace resolution rules with searches from the most qualified name
+// to the least qualified name.
+// - Container references within the CEL program may be relative, and are resolved to fully
+// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// If there is ever a case where an identifier could be in both the container and as an
+// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
+// preserved between compilations even as the container evolves.
+func Abbrevs(qualifiedNames ...string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ for _, qn := range qualifiedNames {
+ qn = strings.TrimSpace(qn)
+ for _, r := range qn {
+ if !isIdentifierChar(r) {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ }
+ ind := strings.LastIndex(qn, ".")
+ if ind <= 0 || ind >= len(qn)-1 {
+ return nil, fmt.Errorf(
+ "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn)
+ }
+ alias := qn[ind+1:]
+ var err error
+ c, err = aliasAs("abbreviation", qn, alias)(c)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
+ }
+}
+
+// Alias associates a fully-qualified name with a user-defined alias.
+//
+// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option
+// are more easily traced back to source code. The Alias option is useful for propagating alias
+// configuration from one Container instance to another, and may also be useful for remapping
+// poorly chosen protobuf message / package names.
+//
+// Note: all of the rules that apply to Abbrevs also apply to Alias.
+func Alias(qualifiedName, alias string) ContainerOption {
+ return aliasAs("alias", qualifiedName, alias)
+}
+
+func aliasAs(kind, qualifiedName, alias string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(alias) == 0 || strings.Contains(alias, ".") {
+ return nil, fmt.Errorf(
+ "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias)
+ }
+
+ if qualifiedName[0:1] == "." {
+ return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s",
+ qualifiedName)
+ }
+ ind := strings.LastIndex(qualifiedName, ".")
+ if ind <= 0 || ind == len(qualifiedName)-1 {
+ return nil, fmt.Errorf("%s must refer to a valid qualified name: %s",
+ kind, qualifiedName)
+ }
+ aliasRef, found := c.aliasSet()[alias]
+ if found {
+ return nil, fmt.Errorf(
+ "%s collides with existing reference: name=%s, %s=%s, existing=%s",
+ kind, qualifiedName, kind, alias, aliasRef)
+ }
+ if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias {
+ return nil, fmt.Errorf(
+ "%s collides with container name: name=%s, %s=%s, container=%s",
+ kind, qualifiedName, kind, alias, c.Name())
+ }
+ if c == nil {
+ c = &Container{}
+ }
+ if c.aliases == nil {
+ c.aliases = make(map[string]string)
+ }
+ c.aliases[alias] = qualifiedName
+ return c, nil
+ }
+}
+
+func isIdentifierChar(r rune) bool {
+ return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r))
+}
+
+// Name sets the fully-qualified name of the Container.
+func Name(name string) ContainerOption {
+ return func(c *Container) (*Container, error) {
+ if len(name) > 0 && name[0:1] == "." {
+ return nil, fmt.Errorf("container name must not contain a leading '.': %s", name)
+ }
+ if c.Name() == name {
+ return c, nil
+ }
+ if c == nil {
+ return &Container{name: name}, nil
+ }
+ c.name = name
+ return c, nil
+ }
+}
+
+// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean
+// 'found' value that indicates if the conversion is successful.
+func ToQualifiedName(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.IdentKind:
+ id := e.AsIdent()
+ return id, true
+ case ast.SelectKind:
+ sel := e.AsSelect()
+ // Test only expressions are not valid as qualified names.
+ if sel.IsTestOnly() {
+ return "", false
+ }
+ if qual, found := ToQualifiedName(sel.Operand()); found {
+ return qual + "." + sel.FieldName(), true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/google/cel-go/common/cost.go b/vendor/github.com/google/cel-go/common/cost.go
new file mode 100644
index 000000000..5e24bd0f4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/cost.go
@@ -0,0 +1,40 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+const (
+ // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select.
+ SelectAndIdentCost = 1
+
+ // ConstCost is the cost of an operation that accesses a constant.
+ ConstCost = 0
+
+ // ListCreateBaseCost is the base cost of any operation that creates a new list.
+ ListCreateBaseCost = 10
+
+ // MapCreateBaseCost is the base cost of any operation that creates a new map.
+ MapCreateBaseCost = 30
+
+ // StructCreateBaseCost is the base cost of any operation that creates a new struct.
+ StructCreateBaseCost = 40
+
+ // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire
+ // string once.
+ StringTraversalCostFactor = 0.1
+
+ // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of
+ // applying the regex to a string of unit cost.
+ RegexStringLengthCostFactor = 0.25
+)
diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
new file mode 100644
index 000000000..724ed3404
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "debug.go",
+ ],
+ importpath = "github.com/google/cel-go/common/debug",
+ deps = [
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
new file mode 100644
index 000000000..25d2e3d71
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -0,0 +1,314 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug provides tools to print a parsed expression graph and
+// adorn each expression element with additional metadata.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Adorner returns debug metadata that will be tacked on to the string
+// representation of an expression.
+type Adorner interface {
+ // GetMetadata for the input context.
+ GetMetadata(ctx any) string
+}
+
+// Writer manages writing expressions to an internal string.
+type Writer interface {
+ fmt.Stringer
+
+ // Buffer pushes an expression into an internal queue of expressions to
+ // write to a string.
+ Buffer(e ast.Expr)
+}
+
+type emptyDebugAdorner struct {
+}
+
+var emptyAdorner Adorner = &emptyDebugAdorner{}
+
+func (a *emptyDebugAdorner) GetMetadata(e any) string {
+ return ""
+}
+
+// ToDebugString gives the unadorned string representation of the Expr.
+func ToDebugString(e ast.Expr) string {
+ return ToAdornedDebugString(e, emptyAdorner)
+}
+
+// ToAdornedDebugString gives the adorned string representation of the Expr.
+func ToAdornedDebugString(e ast.Expr, adorner Adorner) string {
+ w := newDebugWriter(adorner)
+ w.Buffer(e)
+ return w.String()
+}
+
+// debugWriter is used to print out pretty-printed debug strings.
+type debugWriter struct {
+ adorner Adorner
+ buffer bytes.Buffer
+ indent int
+ lineStart bool
+}
+
+func newDebugWriter(a Adorner) *debugWriter {
+ return &debugWriter{
+ adorner: a,
+ indent: 0,
+ lineStart: true,
+ }
+}
+
+func (w *debugWriter) Buffer(e ast.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.Kind() {
+ case ast.LiteralKind:
+ w.append(formatLiteral(e.AsLiteral()))
+ case ast.IdentKind:
+ w.append(e.AsIdent())
+ case ast.SelectKind:
+ w.appendSelect(e.AsSelect())
+ case ast.CallKind:
+ w.appendCall(e.AsCall())
+ case ast.ListKind:
+ w.appendList(e.AsList())
+ case ast.MapKind:
+ w.appendMap(e.AsMap())
+ case ast.StructKind:
+ w.appendStruct(e.AsStruct())
+ case ast.ComprehensionKind:
+ w.appendComprehension(e.AsComprehension())
+ }
+ w.adorn(e)
+}
+
+func (w *debugWriter) appendSelect(sel ast.SelectExpr) {
+ w.Buffer(sel.Operand())
+ w.append(".")
+ w.append(sel.FieldName())
+ if sel.IsTestOnly() {
+ w.append("~test-only~")
+ }
+}
+
+func (w *debugWriter) appendCall(call ast.CallExpr) {
+ if call.IsMemberFunction() {
+ w.Buffer(call.Target())
+ w.append(".")
+ }
+ w.append(call.FunctionName())
+ w.append("(")
+ if len(call.Args()) > 0 {
+ w.addIndent()
+ w.appendLine()
+ for i, arg := range call.Args() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(arg)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append(")")
+}
+
+func (w *debugWriter) appendList(list ast.ListExpr) {
+ w.append("[")
+ if len(list.Elements()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, elem := range list.Elements() {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(elem)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("]")
+}
+
+func (w *debugWriter) appendStruct(obj ast.StructExpr) {
+ w.append(obj.TypeName())
+ w.append("{")
+ if len(obj.Fields()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, f := range obj.Fields() {
+ field := f.AsStructField()
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if field.IsOptional() {
+ w.append("?")
+ }
+ w.append(field.Name())
+ w.append(":")
+ w.Buffer(field.Value())
+ w.adorn(f)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendMap(m ast.MapExpr) {
+ w.append("{")
+ if m.Size() > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, e := range m.Entries() {
+ entry := e.AsMapEntry()
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ if entry.IsOptional() {
+ w.append("?")
+ }
+ w.Buffer(entry.Key())
+ w.append(":")
+ w.Buffer(entry.Value())
+ w.adorn(e)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) {
+ w.append("__comprehension__(")
+ w.addIndent()
+ w.appendLine()
+ w.append("// Variable")
+ w.appendLine()
+ w.append(comprehension.IterVar())
+ w.append(",")
+ w.appendLine()
+ if comprehension.HasIterVar2() {
+ w.append(comprehension.IterVar2())
+ w.append(",")
+ w.appendLine()
+ }
+ w.append("// Target")
+ w.appendLine()
+ w.Buffer(comprehension.IterRange())
+ w.append(",")
+ w.appendLine()
+ w.append("// Accumulator")
+ w.appendLine()
+ w.append(comprehension.AccuVar())
+ w.append(",")
+ w.appendLine()
+ w.append("// Init")
+ w.appendLine()
+ w.Buffer(comprehension.AccuInit())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopCondition")
+ w.appendLine()
+ w.Buffer(comprehension.LoopCondition())
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopStep")
+ w.appendLine()
+ w.Buffer(comprehension.LoopStep())
+ w.append(",")
+ w.appendLine()
+ w.append("// Result")
+ w.appendLine()
+ w.Buffer(comprehension.Result())
+ w.append(")")
+ w.removeIndent()
+}
+
+func formatLiteral(c ref.Val) string {
+ switch v := c.(type) {
+ case types.Bool:
+ return fmt.Sprintf("%t", v)
+ case types.Bytes:
+ return fmt.Sprintf("b\"%s\"", string(v))
+ case types.Double:
+ return fmt.Sprintf("%v", float64(v))
+ case types.Int:
+ return fmt.Sprintf("%d", int64(v))
+ case types.String:
+ return strconv.Quote(string(v))
+ case types.Uint:
+ return fmt.Sprintf("%du", uint64(v))
+ case types.Null:
+ return "null"
+ default:
+ panic("Unknown constant type")
+ }
+}
+
+func (w *debugWriter) append(s string) {
+ w.doIndent()
+ w.buffer.WriteString(s)
+}
+
+func (w *debugWriter) appendFormat(f string, args ...any) {
+ w.append(fmt.Sprintf(f, args...))
+}
+
+func (w *debugWriter) doIndent() {
+ if w.lineStart {
+ w.lineStart = false
+ w.buffer.WriteString(strings.Repeat(" ", w.indent))
+ }
+}
+
+func (w *debugWriter) adorn(e any) {
+ w.append(w.adorner.GetMetadata(e))
+}
+
+func (w *debugWriter) appendLine() {
+ w.buffer.WriteString("\n")
+ w.lineStart = true
+}
+
+func (w *debugWriter) addIndent() {
+ w.indent++
+}
+
+func (w *debugWriter) removeIndent() {
+ w.indent--
+ if w.indent < 0 {
+ panic("negative indent")
+ }
+}
+
+func (w *debugWriter) String() string {
+ return w.buffer.String()
+}
diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
new file mode 100644
index 000000000..17791dce6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel
@@ -0,0 +1,39 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "decls.go",
+ ],
+ importpath = "github.com/google/cel-go/common/decls",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "decls_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go
new file mode 100644
index 000000000..f67808feb
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/decls/decls.go
@@ -0,0 +1,846 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls contains function and variable declaration structs and helper methods.
+package decls
+
+import (
+ "fmt"
+ "strings"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// NewFunction creates a new function declaration with a set of function options to configure overloads
+// and function definitions (implementations).
+//
+// Functions are checked for name collisions and singleton redefinition.
+func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) {
+ fn := &FunctionDecl{
+ name: name,
+ overloads: map[string]*OverloadDecl{},
+ overloadOrdinals: []string{},
+ }
+ var err error
+ for _, opt := range opts {
+ fn, err = opt(fn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(fn.overloads) == 0 {
+ return nil, fmt.Errorf("function %s must have at least one overload", name)
+ }
+ return fn, nil
+}
+
+// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all
+// overload instances.
+type FunctionDecl struct {
+ name string
+
+ // overloads associated with the function name.
+ overloads map[string]*OverloadDecl
+
+ // singleton implementation of the function for all overloads.
+ //
+ // If this option is set, an error will occur if any overloads specify a per-overload implementation
+ // or if another function with the same name attempts to redefine the singleton.
+ singleton *functions.Overload
+
+ // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could
+ // add overhead on common operations. Setting this option true leaves error checks and argument checks
+ // intact.
+ disableTypeGuards bool
+
+ // state indicates that the binding should be provided as a declaration, as a runtime binding, or both.
+ state declarationState
+
+ // overloadOrdinals indicates the order in which the overload was declared.
+ overloadOrdinals []string
+}
+
+type declarationState int
+
+const (
+ declarationStateUnset declarationState = iota
+ declarationDisabled
+ declarationEnabled
+)
+
+// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least'
+func (f *FunctionDecl) Name() string {
+ if f == nil {
+ return ""
+ }
+ return f.name
+}
+
+// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the
+// declaration should not be exposed for use in expressions.
+func (f *FunctionDecl) IsDeclarationDisabled() bool {
+ return f.state == declarationDisabled
+}
+
+// Merge combines an existing function declaration with another.
+//
+// If a function is extended, by say adding new overloads to an existing function, then it is merged with the
+// prior definition of the function at which point its overloads must not collide with pre-existing overloads
+// and its bindings (singleton, or per-overload) must not conflict with previous definitions either.
+func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) {
+ if f == other {
+ return f, nil
+ }
+ if f.Name() != other.Name() {
+ return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name())
+ }
+ merged := &FunctionDecl{
+ name: f.Name(),
+ overloads: make(map[string]*OverloadDecl, len(f.overloads)),
+ singleton: f.singleton,
+ overloadOrdinals: make([]string, len(f.overloads)),
+ // if one function is expecting type-guards and the other is not, then they
+ // must not be disabled.
+ disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards,
+ // default to the current functions declaration state.
+ state: f.state,
+ }
+ // If the other state indicates that the declaration should be explicitly enabled or
+ // disabled, then update the merged state with the most recent value.
+ if other.state != declarationStateUnset {
+ merged.state = other.state
+ }
+ // baseline copy of the overloads and their ordinals
+ copy(merged.overloadOrdinals, f.overloadOrdinals)
+ for oID, o := range f.overloads {
+ merged.overloads[oID] = o
+ }
+ // overloads and their ordinals are added from the left
+ for _, oID := range other.overloadOrdinals {
+ o := other.overloads[oID]
+ err := merged.AddOverload(o)
+ if err != nil {
+ return nil, fmt.Errorf("function declaration merge failed: %v", err)
+ }
+ }
+ if other.singleton != nil {
+ if merged.singleton != nil && merged.singleton != other.singleton {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ merged.singleton = other.singleton
+ }
+ return merged, nil
+}
+
+// AddOverload ensures that the new overload does not collide with an existing overload signature;
+// however, if the function signatures are identical, the implementation may be rewritten as its
+// difficult to compare functions by object identity.
+func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error {
+ if f == nil {
+ return fmt.Errorf("nil function cannot add overload: %s", overload.ID())
+ }
+ for oID, o := range f.overloads {
+ if oID != overload.ID() && o.SignatureOverlaps(overload) {
+ return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID())
+ }
+ if oID == overload.ID() {
+ if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() {
+ // Allow redefinition of an overload implementation so long as the signatures match.
+ if overload.hasBinding() {
+ f.overloads[oID] = overload
+ }
+ return nil
+ }
+ return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID)
+ }
+ }
+ f.overloadOrdinals = append(f.overloadOrdinals, overload.ID())
+ f.overloads[overload.ID()] = overload
+ return nil
+}
+
+// OverloadDecls returns the overload declarations in the order in which they were declared.
+func (f *FunctionDecl) OverloadDecls() []*OverloadDecl {
+ if f == nil {
+ return []*OverloadDecl{}
+ }
+ overloads := make([]*OverloadDecl, 0, len(f.overloads))
+ for _, oID := range f.overloadOrdinals {
+ overloads = append(overloads, f.overloads[oID])
+ }
+ return overloads
+}
+
+// Bindings produces a set of function bindings, if any are defined.
+func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) {
+ if f == nil {
+ return []*functions.Overload{}, nil
+ }
+ overloads := []*functions.Overload{}
+ nonStrict := false
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ if o.hasBinding() {
+ overload := &functions.Overload{
+ Operator: o.ID(),
+ Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards),
+ Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards),
+ Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards),
+ OperandTrait: o.OperandTrait(),
+ NonStrict: o.IsNonStrict(),
+ }
+ overloads = append(overloads, overload)
+ nonStrict = nonStrict || o.IsNonStrict()
+ }
+ }
+ if f.singleton != nil {
+ if len(overloads) != 0 {
+ return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name())
+ }
+ overloads = []*functions.Overload{
+ {
+ Operator: f.Name(),
+ Unary: f.singleton.Unary,
+ Binary: f.singleton.Binary,
+ Function: f.singleton.Function,
+ OperandTrait: f.singleton.OperandTrait,
+ },
+ }
+ // fall-through to return single overload case.
+ }
+ if len(overloads) == 0 {
+ return overloads, nil
+ }
+ // Single overload. Replicate an entry for it using the function name as well.
+ if len(overloads) == 1 {
+ if overloads[0].Operator == f.Name() {
+ return overloads, nil
+ }
+ return append(overloads, &functions.Overload{
+ Operator: f.Name(),
+ Unary: overloads[0].Unary,
+ Binary: overloads[0].Binary,
+ Function: overloads[0].Function,
+ NonStrict: overloads[0].NonStrict,
+ OperandTrait: overloads[0].OperandTrait,
+ }), nil
+ }
+ // All of the defined overloads are wrapped into a top-level function which
+ // performs dynamic dispatch to the proper overload based on the argument types.
+ bindings := append([]*functions.Overload{}, overloads...)
+ funcDispatch := func(args ...ref.Val) ref.Val {
+ for _, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ // During dynamic dispatch over multiple functions, signature agreement checks
+ // are preserved in order to assist with the function resolution step.
+ switch len(args) {
+ case 1:
+ if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.unaryOp(args[0])
+ }
+ case 2:
+ if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.binaryOp(args[0], args[1])
+ }
+ }
+ if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) {
+ return o.functionOp(args...)
+ }
+ // eventually this will fall through to the noSuchOverload below.
+ }
+ return MaybeNoSuchOverload(f.Name(), args...)
+ }
+ function := &functions.Overload{
+ Operator: f.Name(),
+ Function: funcDispatch,
+ NonStrict: nonStrict,
+ }
+ return append(bindings, function), nil
+}
+
+// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or
+// to return an unknown set, or to produce a new error for a missing function signature.
+func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val {
+ argTypes := make([]string, len(args))
+ var unk *types.Unknown = nil
+ for i, arg := range args {
+ if types.IsError(arg) {
+ return arg
+ }
+ if types.IsUnknown(arg) {
+ unk = types.MergeUnknowns(arg.(*types.Unknown), unk)
+ }
+ argTypes[i] = arg.Type().TypeName()
+ }
+ if unk != nil {
+ return unk
+ }
+ signature := strings.Join(argTypes, ", ")
+ return types.NewErr("no such overload: %s(%s)", funcName, signature)
+}
+
+// FunctionOpt defines a functional option for mutating a function declaration.
+type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error)
+
+// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls.
+// Type guards remain on during dynamic dispatch for parsed-only expressions.
+func DisableTypeGuards(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ fn.disableTypeGuards = value
+ return fn, nil
+ }
+}
+
+// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function
+// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations
+// of function declarations while still preserving the runtime behavior for previously compiled expressions.
+func DisableDeclaration(value bool) FunctionOpt {
+ return func(fn *FunctionDecl) (*FunctionDecl, error) {
+ if value {
+ fn.state = declarationDisabled
+ } else {
+ fn.state = declarationEnabled
+ }
+ return fn, nil
+ }
+}
+
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Unary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Binary: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ trait := 0
+ for _, t := range traits {
+ trait = trait | t
+ }
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ if f.singleton != nil {
+ return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name())
+ }
+ f.singleton = &functions.Overload{
+ Operator: f.Name(),
+ Function: fn,
+ OperandTrait: trait,
+ }
+ return f, nil
+ }
+}
+
+// Overload defines a new global overload with an overload id, argument types, and result type. Through the
+// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to
+// be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func Overload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, false, args, resultType, opts...)
+}
+
+// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types,
+// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding,
+// an operand trait, and to be non-strict.
+//
+// Note: function bindings should be commonly configured with Overload instances whereas operand traits and
+// strict-ness should be rare occurrences.
+func MemberOverload(overloadID string,
+ args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return newOverload(overloadID, true, args, resultType, opts...)
+}
+
+func newOverload(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) FunctionOpt {
+ return func(f *FunctionDecl) (*FunctionDecl, error) {
+ overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = f.AddOverload(overload)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+ }
+}
+
+func newOverloadInternal(overloadID string,
+ memberFunction bool, args []*types.Type, resultType *types.Type,
+ opts ...OverloadOpt) (*OverloadDecl, error) {
+ overload := &OverloadDecl{
+ id: overloadID,
+ argTypes: args,
+ resultType: resultType,
+ isMemberFunction: memberFunction,
+ }
+ var err error
+ for _, opt := range opts {
+ overload, err = opt(overload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return overload, nil
+}
+
+// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional
+// implementation.
+type OverloadDecl struct {
+ id string
+ argTypes []*types.Type
+ resultType *types.Type
+ isMemberFunction bool
+ // nonStrict indicates that the function will accept error and unknown arguments as inputs.
+ nonStrict bool
+ // operandTrait indicates whether the member argument should have a specific type-trait.
+ //
+ // This is useful for creating overloads which operate on a type-interface rather than a concrete type.
+ operandTrait int
+
+ // Function implementation options. Optional, but encouraged.
+ // unaryOp is a function binding that takes a single argument.
+ unaryOp functions.UnaryOp
+ // binaryOp is a function binding that takes two arguments.
+ binaryOp functions.BinaryOp
+ // functionOp is a catch-all for zero-arity and three-plus arity functions.
+ functionOp functions.FunctionOp
+}
+
+// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker
+// and interpreter to optimize performance.
+//
+// The ID format is usually one of two styles:
+// global: __
+// member: ___
+func (o *OverloadDecl) ID() string {
+ if o == nil {
+ return ""
+ }
+ return o.id
+}
+
+// ArgTypes contains the set of argument types expected by the overload.
+//
+// For member functions ArgTypes[0] represents the member operand type.
+func (o *OverloadDecl) ArgTypes() []*types.Type {
+ if o == nil {
+ return emptyArgs
+ }
+ return o.argTypes
+}
+
+// IsMemberFunction indicates whether the overload is a member function
+func (o *OverloadDecl) IsMemberFunction() bool {
+ if o == nil {
+ return false
+ }
+ return o.isMemberFunction
+}
+
+// IsNonStrict returns whether the overload accepts errors and unknown values as arguments.
+func (o *OverloadDecl) IsNonStrict() bool {
+ if o == nil {
+ return false
+ }
+ return o.nonStrict
+}
+
+// OperandTrait returns the trait mask of the first operand to the overload call, e.g.
+// `traits.Indexer`
+func (o *OverloadDecl) OperandTrait() int {
+ if o == nil {
+ return 0
+ }
+ return o.operandTrait
+}
+
+// ResultType indicates the output type from calling the function.
+func (o *OverloadDecl) ResultType() *types.Type {
+ if o == nil {
+ // *types.Type is nil-safe
+ return nil
+ }
+ return o.resultType
+}
+
+// TypeParams returns the type parameter names associated with the overload.
+func (o *OverloadDecl) TypeParams() []string {
+ typeParams := map[string]struct{}{}
+ collectParamNames(typeParams, o.ResultType())
+ for _, arg := range o.ArgTypes() {
+ collectParamNames(typeParams, arg)
+ }
+ params := make([]string, 0, len(typeParams))
+ for param := range typeParams {
+ params = append(params, param)
+ }
+ return params
+}
+
+// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature.
+//
+// Result type, operand trait, and strict-ness are not considered as part of signature equality.
+func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool {
+ if o == other {
+ return true
+ }
+ if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ for i, at := range o.ArgTypes() {
+ oat := other.ArgTypes()[i]
+ if !at.IsEquivalentType(oat) {
+ return false
+ }
+ }
+ return o.ResultType().IsEquivalentType(other.ResultType())
+}
+
+// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures.
+//
+// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type.
+func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool {
+ if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) {
+ return false
+ }
+ argsOverlap := true
+ for i, argType := range o.ArgTypes() {
+ otherArgType := other.ArgTypes()[i]
+ argsOverlap = argsOverlap &&
+ (argType.IsAssignableType(otherArgType) ||
+ otherArgType.IsAssignableType(argType))
+ }
+ return argsOverlap
+}
+
+// hasBinding indicates whether the overload already has a definition.
+func (o *OverloadDecl) hasBinding() bool {
+ return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil)
+}
+
+// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined.
+func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp {
+ if o.unaryOp == nil {
+ return nil
+ }
+ return func(arg ref.Val) ref.Val {
+ if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) {
+ return MaybeNoSuchOverload(funcName, arg)
+ }
+ return o.unaryOp(arg)
+ }
+}
+
+// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined.
+func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp {
+ if o.binaryOp == nil {
+ return nil
+ }
+ return func(arg1, arg2 ref.Val) ref.Val {
+ if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) {
+ return MaybeNoSuchOverload(funcName, arg1, arg2)
+ }
+ return o.binaryOp(arg1, arg2)
+ }
+}
+
+// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided.
+func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp {
+ if o.functionOp == nil {
+ return nil
+ }
+ return func(args ...ref.Val) ref.Val {
+ if !o.matchesRuntimeSignature(disableTypeGuards, args...) {
+ return MaybeNoSuchOverload(funcName, args...)
+ }
+ return o.functionOp(args...)
+ }
+}
+
+// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument.
+func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) &&
+ matchOperandTrait(o.OperandTrait(), arg)
+}
+
+// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool {
+ return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) &&
+ matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) &&
+ matchOperandTrait(o.OperandTrait(), arg1)
+}
+
+// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments.
+func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool {
+ if len(args) != len(o.ArgTypes()) {
+ return false
+ }
+ if len(args) == 0 {
+ return true
+ }
+ for i, arg := range args {
+ if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) {
+ return false
+ }
+ }
+ return matchOperandTrait(o.OperandTrait(), args[0])
+}
+
+func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool {
+ if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) {
+ return true
+ }
+ if types.IsUnknownOrError(arg) {
+ return false
+ }
+ return disableTypeGuards || argType.IsAssignableRuntimeType(arg)
+}
+
+func matchOperandTrait(trait int, arg ref.Val) bool {
+ return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg)
+}
+
+// OverloadOpt is a functional option for configuring a function overload.
+type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error)
+
+// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func UnaryBinding(binding functions.UnaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 1 {
+ return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID())
+ }
+ o.unaryOp = binding
+ return o, nil
+ }
+}
+
+// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func BinaryBinding(binding functions.BinaryOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ if len(o.ArgTypes()) != 2 {
+ return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID())
+ }
+ o.binaryOp = binding
+ return o, nil
+ }
+}
+
+// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime
+// type-guard which ensures runtime type agreement between the overload signature and runtime argument types.
+func FunctionBinding(binding functions.FunctionOp) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ if o.hasBinding() {
+ return nil, fmt.Errorf("overload already has a binding: %s", o.ID())
+ }
+ o.functionOp = binding
+ return o, nil
+ }
+}
+
+// OverloadIsNonStrict enables the function to be called with error and unknown argument values.
+//
+// Note: do not use this option unless absoluately necessary as it should be an uncommon feature.
+func OverloadIsNonStrict() OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.nonStrict = true
+ return o, nil
+ }
+}
+
+// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be
+// successfully invoked.
+func OverloadOperandTrait(trait int) OverloadOpt {
+ return func(o *OverloadDecl) (*OverloadDecl, error) {
+ o.operandTrait = trait
+ return o, nil
+ }
+}
+
+// NewConstant creates a new constant declaration.
+func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl {
+ return &VariableDecl{name: name, varType: t, value: v}
+}
+
+// NewVariable creates a new variable declaration.
+func NewVariable(name string, t *types.Type) *VariableDecl {
+ return &VariableDecl{name: name, varType: t}
+}
+
+// VariableDecl defines a variable declaration which may optionally have a constant value.
+type VariableDecl struct {
+ name string
+ varType *types.Type
+ value ref.Val
+}
+
+// Name returns the fully-qualified variable name
+func (v *VariableDecl) Name() string {
+ if v == nil {
+ return ""
+ }
+ return v.name
+}
+
+// Type returns the types.Type value associated with the variable.
+func (v *VariableDecl) Type() *types.Type {
+ if v == nil {
+ // types.Type is nil-safe
+ return nil
+ }
+ return v.varType
+}
+
+// Value returns the constant value associated with the declaration.
+func (v *VariableDecl) Value() ref.Val {
+ if v == nil {
+ return nil
+ }
+ return v.value
+}
+
+// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input.
+func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool {
+ if v == other {
+ return true
+ }
+ return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type())
+}
+
+// TypeVariable creates a new type identifier for use within a types.Provider
+func TypeVariable(t *types.Type) *VariableDecl {
+ return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t))
+}
+
+// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration.
+func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) {
+ varType, err := types.TypeToExprType(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewVar(v.Name(), varType), nil
+}
+
+// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration.
+func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) {
+ overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads))
+ for i, oID := range f.overloadOrdinals {
+ o := f.overloads[oID]
+ paramNames := map[string]struct{}{}
+ argTypes := make([]*exprpb.Type, len(o.ArgTypes()))
+ for j, a := range o.ArgTypes() {
+ collectParamNames(paramNames, a)
+ at, err := types.TypeToExprType(a)
+ if err != nil {
+ return nil, err
+ }
+ argTypes[j] = at
+ }
+ collectParamNames(paramNames, o.ResultType())
+ resultType, err := types.TypeToExprType(o.ResultType())
+ if err != nil {
+ return nil, err
+ }
+ if len(paramNames) == 0 {
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType)
+ } else {
+ overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType)
+ }
+ } else {
+ params := []string{}
+ for pn := range paramNames {
+ params = append(params, pn)
+ }
+ if o.IsMemberFunction() {
+ overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params)
+ } else {
+ overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params)
+ }
+ }
+ }
+ return chkdecls.NewFunction(f.Name(), overloads...), nil
+}
+
+func collectParamNames(paramNames map[string]struct{}, arg *types.Type) {
+ if arg.Kind() == types.TypeParamKind {
+ paramNames[arg.TypeName()] = struct{}{}
+ }
+ for _, param := range arg.Parameters() {
+ collectParamNames(paramNames, param)
+ }
+}
+
+var (
+ emptyArgs = []*types.Type{}
+)
diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go
new file mode 100644
index 000000000..5362fdfe4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package common defines types and utilities common to expression parsing,
+// checking, and interpretation
+package common
diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go
new file mode 100644
index 000000000..0cf21345e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/error.go
@@ -0,0 +1,74 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// NewError creates an error associated with an expression id with the given message at the given location.
+func NewError(id int64, message string, location Location) *Error {
+ return &Error{Message: message, Location: location, ExprID: id}
+}
+
+// Error type which references an expression id, a location within source, and a message.
+type Error struct {
+ Location Location
+ Message string
+ ExprID int64
+}
+
+const (
+ dot = "."
+ ind = "^"
+ wideDot = "\uff0e"
+ wideInd = "\uff3e"
+
+ // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet.
+ maxSnippetLength = 16384
+)
+
+// ToDisplayString decorates the error message with the source location.
+func (e *Error) ToDisplayString(source Source) string {
+ var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
+ source.Description(),
+ e.Location.Line(),
+ e.Location.Column()+1, // add one to the 0-based column for display
+ e.Message)
+ if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ indLine += wideDot
+ } else {
+ indLine += dot
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ indLine += wideInd
+ } else {
+ indLine += ind
+ }
+ result += srcLine + indLine
+ }
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
new file mode 100644
index 000000000..25adc73d8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -0,0 +1,103 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Errors type which contains a list of errors observed during parsing.
+type Errors struct {
+ errors []*Error
+ source Source
+ numErrors int
+ maxErrorsToReport int
+}
+
+// NewErrors creates a new instance of the Errors type.
+func NewErrors(source Source) *Errors {
+ return &Errors{
+ errors: []*Error{},
+ source: source,
+ maxErrorsToReport: 100,
+ }
+}
+
+// ReportError records an error at a source location.
+func (e *Errors) ReportError(l Location, format string, args ...any) {
+ e.ReportErrorAtID(0, l, format, args...)
+}
+
+// ReportErrorAtID records an error at a source location and expression id.
+func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) {
+ e.numErrors++
+ if e.numErrors > e.maxErrorsToReport {
+ return
+ }
+ err := &Error{
+ ExprID: id,
+ Location: l,
+ Message: fmt.Sprintf(format, args...),
+ }
+ e.errors = append(e.errors, err)
+}
+
+// GetErrors returns the list of observed errors.
+func (e *Errors) GetErrors() []*Error {
+ return e.errors[:]
+}
+
+// Append creates a new Errors object with the current and input errors.
+func (e *Errors) Append(errs []*Error) *Errors {
+ return &Errors{
+ errors: append(e.errors[:], errs...),
+ source: e.source,
+ numErrors: e.numErrors + len(errs),
+ maxErrorsToReport: e.maxErrorsToReport,
+ }
+}
+
+// ToDisplayString returns the error set to a newline delimited string.
+func (e *Errors) ToDisplayString() string {
+ errorsInString := e.maxErrorsToReport
+ if e.numErrors > e.maxErrorsToReport {
+ // add one more error to indicate the number of errors truncated.
+ errorsInString++
+ } else {
+ // otherwise the error set will just contain the number of errors.
+ errorsInString = e.numErrors
+ }
+
+ result := make([]string, errorsInString)
+ sort.SliceStable(e.errors, func(i, j int) bool {
+ ei := e.errors[i].Location
+ ej := e.errors[j].Location
+ return ei.Line() < ej.Line() ||
+ (ei.Line() == ej.Line() && ei.Column() < ej.Column())
+ })
+ for i, err := range e.errors {
+ // This can happen during the append of two errors objects
+ if i >= e.maxErrorsToReport {
+ break
+ }
+ result[i] = err.ToDisplayString(e.source)
+ }
+ if e.numErrors > e.maxErrorsToReport {
+ result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport)
+ }
+ return strings.Join(result, "\n")
+}
diff --git a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
new file mode 100644
index 000000000..3cc27d60c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel
@@ -0,0 +1,17 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "functions.go",
+ ],
+ importpath = "github.com/google/cel-go/common/functions",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/functions/functions.go b/vendor/github.com/google/cel-go/common/functions/functions.go
new file mode 100644
index 000000000..67f4a5944
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/functions/functions.go
@@ -0,0 +1,61 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package functions defines the standard builtin functions supported by the interpreter
+package functions
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Overload defines a named overload of a function, indicating an operand trait
+// which must be present on the first argument to the overload as well as one
+// of either a unary, binary, or function implementation.
+//
+// The majority of operators within the expression language are unary or binary
+// and the specializations simplify the call contract for implementers of
+// types with operator overloads. Any added complexity is assumed to be handled
+// by the generic FunctionOp.
+type Overload struct {
+ // Operator name as written in an expression or defined within
+ // operators.go.
+ Operator string
+
+ // Operand trait used to dispatch the call. The zero-value indicates a
+ // global function overload or that one of the Unary / Binary / Function
+ // definitions should be used to execute the call.
+ OperandTrait int
+
+ // Unary defines the overload with a UnaryOp implementation. May be nil.
+ Unary UnaryOp
+
+ // Binary defines the overload with a BinaryOp implementation. May be nil.
+ Binary BinaryOp
+
+ // Function defines the overload with a FunctionOp implementation. May be
+ // nil.
+ Function FunctionOp
+
+ // NonStrict specifies whether the Overload will tolerate arguments that
+ // are types.Err or types.Unknown.
+ NonStrict bool
+}
+
+// UnaryOp is a function that takes a single value and produces an output.
+type UnaryOp func(value ref.Val) ref.Val
+
+// BinaryOp is a function that takes two values and produces an output.
+type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
+
+// FunctionOp is a function with accepts zero or more arguments and produces
+// a value or error as a result.
+type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/vendor/github.com/google/cel-go/common/location.go b/vendor/github.com/google/cel-go/common/location.go
new file mode 100644
index 000000000..ec3fa7cb5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/location.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+// Location interface to represent a location within Source.
+type Location interface {
+ Line() int // 1-based line number within source.
+ Column() int // 0-based column number within source.
+}
+
+// SourceLocation helper type to manually construct a location.
+type SourceLocation struct {
+ line int
+ column int
+}
+
+var (
+ // Location implements the SourceLocation interface.
+ _ Location = &SourceLocation{}
+ // NoLocation is a particular illegal location.
+ NoLocation = &SourceLocation{-1, -1}
+)
+
+// NewLocation creates a new location.
+func NewLocation(line, column int) Location {
+ return &SourceLocation{
+ line: line,
+ column: column}
+}
+
+// Line returns the 1-based line of the location.
+func (l *SourceLocation) Line() int {
+ return l.line
+}
+
+// Column returns the 0-based column number of the location.
+func (l *SourceLocation) Column() int {
+ return l.column
+}
diff --git a/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
new file mode 100644
index 000000000..b5b67f062
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "operators.go",
+ ],
+ importpath = "github.com/google/cel-go/common/operators",
+)
diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go
new file mode 100644
index 000000000..f9b39bda3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -0,0 +1,157 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package operators defines the internal function names of operators.
+//
+// All operators in the expression language are modelled as function calls.
+package operators
+
+// String "names" for CEL operators.
+const (
+ // Symbolic operators.
+ Conditional = "_?_:_"
+ LogicalAnd = "_&&_"
+ LogicalOr = "_||_"
+ LogicalNot = "!_"
+ Equals = "_==_"
+ NotEquals = "_!=_"
+ Less = "_<_"
+ LessEquals = "_<=_"
+ Greater = "_>_"
+ GreaterEquals = "_>=_"
+ Add = "_+_"
+ Subtract = "_-_"
+ Multiply = "_*_"
+ Divide = "_/_"
+ Modulo = "_%_"
+ Negate = "-_"
+ Index = "_[_]"
+ OptIndex = "_[?_]"
+ OptSelect = "_?._"
+
+ // Macros, must have a valid identifier.
+ Has = "has"
+ All = "all"
+ Exists = "exists"
+ ExistsOne = "exists_one"
+ Map = "map"
+ Filter = "filter"
+
+ // Named operators, must not have be valid identifiers.
+ NotStrictlyFalse = "@not_strictly_false"
+ In = "@in"
+
+ // Deprecated: named operators with valid identifiers.
+ OldNotStrictlyFalse = "__not_strictly_false__"
+ OldIn = "_in_"
+)
+
+var (
+ operators = map[string]string{
+ "+": Add,
+ "/": Divide,
+ "==": Equals,
+ ">": Greater,
+ ">=": GreaterEquals,
+ "in": In,
+ "<": Less,
+ "<=": LessEquals,
+ "%": Modulo,
+ "*": Multiply,
+ "!=": NotEquals,
+ "-": Subtract,
+ }
+ // operatorMap of the operator symbol which refers to a struct containing the display name,
+ // if applicable, the operator precedence, and the arity.
+ //
+ // If the symbol does not have a display name listed in the map, it is only because it requires
+ // special casing to render properly as text.
+ operatorMap = map[string]struct {
+ displayName string
+ precedence int
+ arity int
+ }{
+ Conditional: {displayName: "", precedence: 8, arity: 3},
+ LogicalOr: {displayName: "||", precedence: 7, arity: 2},
+ LogicalAnd: {displayName: "&&", precedence: 6, arity: 2},
+ Equals: {displayName: "==", precedence: 5, arity: 2},
+ Greater: {displayName: ">", precedence: 5, arity: 2},
+ GreaterEquals: {displayName: ">=", precedence: 5, arity: 2},
+ In: {displayName: "in", precedence: 5, arity: 2},
+ Less: {displayName: "<", precedence: 5, arity: 2},
+ LessEquals: {displayName: "<=", precedence: 5, arity: 2},
+ NotEquals: {displayName: "!=", precedence: 5, arity: 2},
+ OldIn: {displayName: "in", precedence: 5, arity: 2},
+ Add: {displayName: "+", precedence: 4, arity: 2},
+ Subtract: {displayName: "-", precedence: 4, arity: 2},
+ Divide: {displayName: "/", precedence: 3, arity: 2},
+ Modulo: {displayName: "%", precedence: 3, arity: 2},
+ Multiply: {displayName: "*", precedence: 3, arity: 2},
+ LogicalNot: {displayName: "!", precedence: 2, arity: 1},
+ Negate: {displayName: "-", precedence: 2, arity: 1},
+ Index: {displayName: "", precedence: 1, arity: 2},
+ OptIndex: {displayName: "", precedence: 1, arity: 2},
+ OptSelect: {displayName: "", precedence: 1, arity: 2},
+ }
+)
+
+// Find the internal function name for an operator, if the input text is one.
+func Find(text string) (string, bool) {
+ op, found := operators[text]
+ return op, found
+}
+
+// FindReverse returns the unmangled, text representation of the operator.
+func FindReverse(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator.
+//
+// If the symbol does refer to an operator, but the operator does not have a display name the
+// result is false.
+func FindReverseBinaryOperator(symbol string) (string, bool) {
+ op, found := operatorMap[symbol]
+ if !found || op.arity != 2 {
+ return "", false
+ }
+ if op.displayName == "" {
+ return "", false
+ }
+ return op.displayName, true
+}
+
+// Precedence returns the operator precedence, where the higher the number indicates
+// higher precedence operations.
+func Precedence(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return 0
+ }
+ return op.precedence
+}
+
+// Arity returns the number of argument the operator takes
+// -1 is returned if an undefined symbol is provided
+func Arity(symbol string) int {
+ op, found := operatorMap[symbol]
+ if !found {
+ return -1
+ }
+ return op.arity
+}
diff --git a/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
new file mode 100644
index 000000000..e46e2f483
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel
@@ -0,0 +1,14 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "overloads.go",
+ ],
+ importpath = "github.com/google/cel-go/common/overloads",
+)
diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go
new file mode 100644
index 000000000..9d50f4367
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -0,0 +1,327 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package overloads defines the internal overload identifiers for function and
+// operator overloads.
+package overloads
+
+// Boolean logic overloads
+const (
+ Conditional = "conditional"
+ LogicalAnd = "logical_and"
+ LogicalOr = "logical_or"
+ LogicalNot = "logical_not"
+ NotStrictlyFalse = "not_strictly_false"
+ Equals = "equals"
+ NotEquals = "not_equals"
+ LessBool = "less_bool"
+ LessInt64 = "less_int64"
+ LessInt64Double = "less_int64_double"
+ LessInt64Uint64 = "less_int64_uint64"
+ LessUint64 = "less_uint64"
+ LessUint64Double = "less_uint64_double"
+ LessUint64Int64 = "less_uint64_int64"
+ LessDouble = "less_double"
+ LessDoubleInt64 = "less_double_int64"
+ LessDoubleUint64 = "less_double_uint64"
+ LessString = "less_string"
+ LessBytes = "less_bytes"
+ LessTimestamp = "less_timestamp"
+ LessDuration = "less_duration"
+ LessEqualsBool = "less_equals_bool"
+ LessEqualsInt64 = "less_equals_int64"
+ LessEqualsInt64Double = "less_equals_int64_double"
+ LessEqualsInt64Uint64 = "less_equals_int64_uint64"
+ LessEqualsUint64 = "less_equals_uint64"
+ LessEqualsUint64Double = "less_equals_uint64_double"
+ LessEqualsUint64Int64 = "less_equals_uint64_int64"
+ LessEqualsDouble = "less_equals_double"
+ LessEqualsDoubleInt64 = "less_equals_double_int64"
+ LessEqualsDoubleUint64 = "less_equals_double_uint64"
+ LessEqualsString = "less_equals_string"
+ LessEqualsBytes = "less_equals_bytes"
+ LessEqualsTimestamp = "less_equals_timestamp"
+ LessEqualsDuration = "less_equals_duration"
+ GreaterBool = "greater_bool"
+ GreaterInt64 = "greater_int64"
+ GreaterInt64Double = "greater_int64_double"
+ GreaterInt64Uint64 = "greater_int64_uint64"
+ GreaterUint64 = "greater_uint64"
+ GreaterUint64Double = "greater_uint64_double"
+ GreaterUint64Int64 = "greater_uint64_int64"
+ GreaterDouble = "greater_double"
+ GreaterDoubleInt64 = "greater_double_int64"
+ GreaterDoubleUint64 = "greater_double_uint64"
+ GreaterString = "greater_string"
+ GreaterBytes = "greater_bytes"
+ GreaterTimestamp = "greater_timestamp"
+ GreaterDuration = "greater_duration"
+ GreaterEqualsBool = "greater_equals_bool"
+ GreaterEqualsInt64 = "greater_equals_int64"
+ GreaterEqualsInt64Double = "greater_equals_int64_double"
+ GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64"
+ GreaterEqualsUint64 = "greater_equals_uint64"
+ GreaterEqualsUint64Double = "greater_equals_uint64_double"
+ GreaterEqualsUint64Int64 = "greater_equals_uint64_int64"
+ GreaterEqualsDouble = "greater_equals_double"
+ GreaterEqualsDoubleInt64 = "greater_equals_double_int64"
+ GreaterEqualsDoubleUint64 = "greater_equals_double_uint64"
+ GreaterEqualsString = "greater_equals_string"
+ GreaterEqualsBytes = "greater_equals_bytes"
+ GreaterEqualsTimestamp = "greater_equals_timestamp"
+ GreaterEqualsDuration = "greater_equals_duration"
+)
+
+// Math overloads
+const (
+ AddInt64 = "add_int64"
+ AddUint64 = "add_uint64"
+ AddDouble = "add_double"
+ AddString = "add_string"
+ AddBytes = "add_bytes"
+ AddList = "add_list"
+ AddTimestampDuration = "add_timestamp_duration"
+ AddDurationTimestamp = "add_duration_timestamp"
+ AddDurationDuration = "add_duration_duration"
+ SubtractInt64 = "subtract_int64"
+ SubtractUint64 = "subtract_uint64"
+ SubtractDouble = "subtract_double"
+ SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
+ SubtractTimestampDuration = "subtract_timestamp_duration"
+ SubtractDurationDuration = "subtract_duration_duration"
+ MultiplyInt64 = "multiply_int64"
+ MultiplyUint64 = "multiply_uint64"
+ MultiplyDouble = "multiply_double"
+ DivideInt64 = "divide_int64"
+ DivideUint64 = "divide_uint64"
+ DivideDouble = "divide_double"
+ ModuloInt64 = "modulo_int64"
+ ModuloUint64 = "modulo_uint64"
+ NegateInt64 = "negate_int64"
+ NegateDouble = "negate_double"
+)
+
+// Index overloads
+const (
+ IndexList = "index_list"
+ IndexMap = "index_map"
+ IndexMessage = "index_message" // TODO: introduce concept of types.Message
+)
+
+// In operators
+const (
+ DeprecatedIn = "in"
+ InList = "in_list"
+ InMap = "in_map"
+ InMessage = "in_message" // TODO: introduce concept of types.Message
+)
+
+// Size overloads
+const (
+ Size = "size"
+ SizeString = "size_string"
+ SizeBytes = "size_bytes"
+ SizeList = "size_list"
+ SizeMap = "size_map"
+ SizeStringInst = "string_size"
+ SizeBytesInst = "bytes_size"
+ SizeListInst = "list_size"
+ SizeMapInst = "map_size"
+)
+
+// String function names.
+const (
+ Contains = "contains"
+ EndsWith = "endsWith"
+ Matches = "matches"
+ StartsWith = "startsWith"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtQuoteString = "strings_quote"
+)
+
+// String function overload names.
+const (
+ ContainsString = "contains_string"
+ EndsWithString = "ends_with_string"
+ MatchesString = "matches_string"
+ StartsWithString = "starts_with_string"
+)
+
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtFormatString = "string_format"
+)
+
+// Time-based functions.
+const (
+ TimeGetFullYear = "getFullYear"
+ TimeGetMonth = "getMonth"
+ TimeGetDayOfYear = "getDayOfYear"
+ TimeGetDate = "getDate"
+ TimeGetDayOfMonth = "getDayOfMonth"
+ TimeGetDayOfWeek = "getDayOfWeek"
+ TimeGetHours = "getHours"
+ TimeGetMinutes = "getMinutes"
+ TimeGetSeconds = "getSeconds"
+ TimeGetMilliseconds = "getMilliseconds"
+)
+
+// Timestamp overloads for time functions without timezones.
+const (
+ TimestampToYear = "timestamp_to_year"
+ TimestampToMonth = "timestamp_to_month"
+ TimestampToDayOfYear = "timestamp_to_day_of_year"
+ TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
+ TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based"
+ TimestampToDayOfWeek = "timestamp_to_day_of_week"
+ TimestampToHours = "timestamp_to_hours"
+ TimestampToMinutes = "timestamp_to_minutes"
+ TimestampToSeconds = "timestamp_to_seconds"
+ TimestampToMilliseconds = "timestamp_to_milliseconds"
+)
+
+// Timestamp overloads for time functions with timezones.
+const (
+ TimestampToYearWithTz = "timestamp_to_year_with_tz"
+ TimestampToMonthWithTz = "timestamp_to_month_with_tz"
+ TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz"
+ TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
+ TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz"
+ TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz"
+ TimestampToHoursWithTz = "timestamp_to_hours_with_tz"
+ TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz"
+ TimestampToSecondsWithTz = "timestamp_to_seconds_tz"
+ TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz"
+)
+
+// Duration overloads for time functions.
+const (
+ DurationToHours = "duration_to_hours"
+ DurationToMinutes = "duration_to_minutes"
+ DurationToSeconds = "duration_to_seconds"
+ DurationToMilliseconds = "duration_to_milliseconds"
+)
+
+// Type conversion methods and overloads
+const (
+ TypeConvertInt = "int"
+ TypeConvertUint = "uint"
+ TypeConvertDouble = "double"
+ TypeConvertBool = "bool"
+ TypeConvertString = "string"
+ TypeConvertBytes = "bytes"
+ TypeConvertTimestamp = "timestamp"
+ TypeConvertDuration = "duration"
+ TypeConvertType = "type"
+ TypeConvertDyn = "dyn"
+)
+
+// Int conversion functions.
+const (
+ IntToInt = "int64_to_int64"
+ UintToInt = "uint64_to_int64"
+ DoubleToInt = "double_to_int64"
+ StringToInt = "string_to_int64"
+ TimestampToInt = "timestamp_to_int64"
+ DurationToInt = "duration_to_int64"
+)
+
+// Uint conversion functions.
+const (
+ UintToUint = "uint64_to_uint64"
+ IntToUint = "int64_to_uint64"
+ DoubleToUint = "double_to_uint64"
+ StringToUint = "string_to_uint64"
+)
+
+// Double conversion functions.
+const (
+ DoubleToDouble = "double_to_double"
+ IntToDouble = "int64_to_double"
+ UintToDouble = "uint64_to_double"
+ StringToDouble = "string_to_double"
+)
+
+// Bool conversion functions.
+const (
+ BoolToBool = "bool_to_bool"
+ StringToBool = "string_to_bool"
+)
+
+// Bytes conversion functions.
+const (
+ BytesToBytes = "bytes_to_bytes"
+ StringToBytes = "string_to_bytes"
+)
+
+// String conversion functions.
+const (
+ StringToString = "string_to_string"
+ BoolToString = "bool_to_string"
+ IntToString = "int64_to_string"
+ UintToString = "uint64_to_string"
+ DoubleToString = "double_to_string"
+ BytesToString = "bytes_to_string"
+ TimestampToString = "timestamp_to_string"
+ DurationToString = "duration_to_string"
+)
+
+// Timestamp conversion functions
+const (
+ TimestampToTimestamp = "timestamp_to_timestamp"
+ StringToTimestamp = "string_to_timestamp"
+ IntToTimestamp = "int64_to_timestamp"
+)
+
+// Convert duration from string
+const (
+ DurationToDuration = "duration_to_duration"
+ StringToDuration = "string_to_duration"
+ IntToDuration = "int64_to_duration"
+)
+
+// Convert to dyn
+const (
+ ToDyn = "to_dyn"
+)
+
+// Comprehensions helper methods, not directly accessible via a developer.
+const (
+ Iterator = "@iterator"
+ HasNext = "@hasNext"
+ Next = "@next"
+)
+
+// IsTypeConversionFunction returns whether the input function is a standard library type
+// conversion function.
+func IsTypeConversionFunction(function string) bool {
+ switch function {
+ case TypeConvertBool,
+ TypeConvertBytes,
+ TypeConvertDouble,
+ TypeConvertDuration,
+ TypeConvertDyn,
+ TypeConvertInt,
+ TypeConvertString,
+ TypeConvertTimestamp,
+ TypeConvertType,
+ TypeConvertUint:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
new file mode 100644
index 000000000..bb30242cf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel
@@ -0,0 +1,25 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "buffer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/runes",
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "buffer_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go
new file mode 100644
index 000000000..021198224
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/runes/buffer.go
@@ -0,0 +1,242 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runes provides interfaces and utilities for working with runes.
+package runes
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// Buffer is an interface for accessing a contiguous array of code points.
+type Buffer interface {
+ Get(i int) rune
+ Slice(i, j int) string
+ Len() int
+}
+
+type emptyBuffer struct{}
+
+func (e *emptyBuffer) Get(i int) rune {
+ panic("slice index out of bounds")
+}
+
+func (e *emptyBuffer) Slice(i, j int) string {
+ if i != 0 || i != j {
+ panic("slice index out of bounds")
+ }
+ return ""
+}
+
+func (e *emptyBuffer) Len() int {
+ return 0
+}
+
+var _ Buffer = &emptyBuffer{}
+
+// asciiBuffer is an implementation for an array of code points that contain code points only from
+// the ASCII character set.
+type asciiBuffer struct {
+ arr []byte
+}
+
+func (a *asciiBuffer) Get(i int) rune {
+ return rune(uint32(a.arr[i]))
+}
+
+func (a *asciiBuffer) Slice(i, j int) string {
+ return string(a.arr[i:j])
+}
+
+func (a *asciiBuffer) Len() int {
+ return len(a.arr)
+}
+
+var _ Buffer = &asciiBuffer{}
+
+// basicBuffer is an implementation for an array of code points that contain code points from both
+// the Latin-1 character set and Basic Multilingual Plane.
+type basicBuffer struct {
+ arr []uint16
+}
+
+func (b *basicBuffer) Get(i int) rune {
+ return rune(uint32(b.arr[i]))
+}
+
+func (b *basicBuffer) Slice(i, j int) string {
+ var str strings.Builder
+ str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3.
+ for ; i < j; i++ {
+ str.WriteRune(rune(uint32(b.arr[i])))
+ }
+ return str.String()
+}
+
+func (b *basicBuffer) Len() int {
+ return len(b.arr)
+}
+
+var _ Buffer = &basicBuffer{}
+
+// supplementalBuffer is an implementation for an array of code points that contain code points from
+// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane.
+type supplementalBuffer struct {
+ arr []rune
+}
+
+func (s *supplementalBuffer) Get(i int) rune {
+ return rune(uint32(s.arr[i]))
+}
+
+func (s *supplementalBuffer) Slice(i, j int) string {
+ return string(s.arr[i:j])
+}
+
+func (s *supplementalBuffer) Len() int {
+ return len(s.arr)
+}
+
+var _ Buffer = &supplementalBuffer{}
+
+var nilBuffer = &emptyBuffer{}
+
+// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of
+// the encoded code points contained within.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBuffer(data string) Buffer {
+ buf, _ := newBuffer(data, false)
+ return buf
+}
+
+// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on
+// the ranges of the encoded code points contained within, as well as returning the line offsets.
+//
+// Code points are represented as an array of byte, uint16, or rune. This approach ensures that
+// each index represents a code point by itself without needing to use an array of rune. At first
+// we assume all code points are less than or equal to '\u007f'. If this holds true, the
+// underlying storage is a byte array containing only ASCII characters. If we encountered a code
+// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the
+// elements of previous byte array to the uint16 array, and continue. If this holds true, the
+// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual
+// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous
+// elements of the byte or uint16 array, and continue. The underlying storage is an rune array
+// containing any Unicode character.
+func NewBufferAndLineOffsets(data string) (Buffer, []int32) {
+ return newBuffer(data, true)
+}
+
+func newBuffer(data string, lines bool) (Buffer, []int32) {
+ if len(data) == 0 {
+ return nilBuffer, []int32{0}
+ }
+ var (
+ idx = 0
+ off int32 = 0
+ buf8 = make([]byte, 0, len(data))
+ buf16 []uint16
+ buf32 []rune
+ offs []int32
+ )
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ if r < utf8.RuneSelf {
+ buf8 = append(buf8, byte(r))
+ off++
+ continue
+ }
+ if r <= 0xffff {
+ buf16 = make([]uint16, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf16[i] = uint16(v)
+ }
+ buf8 = nil
+ buf16 = append(buf16, uint16(r))
+ off++
+ goto copy16
+ }
+ buf32 = make([]rune, len(buf8), len(data))
+ for i, v := range buf8 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf8 = nil
+ buf32 = append(buf32, r)
+ off++
+ goto copy32
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &asciiBuffer{
+ arr: buf8,
+ }, offs
+copy16:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ if r <= 0xffff {
+ buf16 = append(buf16, uint16(r))
+ off++
+ continue
+ }
+ buf32 = make([]rune, len(buf16), len(data))
+ for i, v := range buf16 {
+ buf32[i] = rune(uint32(v))
+ }
+ buf16 = nil
+ buf32 = append(buf32, r)
+ off++
+ goto copy32
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &basicBuffer{
+ arr: buf16,
+ }, offs
+copy32:
+ for idx < len(data) {
+ r, s := utf8.DecodeRuneInString(data[idx:])
+ idx += s
+ if lines && r == '\n' {
+ offs = append(offs, off+1)
+ }
+ buf32 = append(buf32, r)
+ off++
+ }
+ if lines {
+ offs = append(offs, off+1)
+ }
+ return &supplementalBuffer{
+ arr: buf32,
+ }, offs
+}
diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go
new file mode 100644
index 000000000..ec79cb545
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/source.go
@@ -0,0 +1,173 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "github.com/google/cel-go/common/runes"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface for filter source contents.
+type Source interface {
+ // Content returns the source content represented as a string.
+ // Examples contents are the single file contents, textbox field,
+ // or url parameter.
+ Content() string
+
+ // Description gives a brief description of the source.
+ // Example descriptions are a file name or ui element.
+ Description() string
+
+ // LineOffsets gives the character offsets at which lines occur.
+ // The zero-th entry should refer to the break between the first
+ // and second line, or EOF if there is only one line of source.
+ LineOffsets() []int32
+
+ // LocationOffset translates a Location to an offset.
+ // Given the line and column of the Location returns the
+ // Location's character offset in the Source, and a bool
+ // indicating whether the Location was found.
+ LocationOffset(location Location) (int32, bool)
+
+ // OffsetLocation translates a character offset to a Location, or
+ // false if the conversion was not feasible.
+ OffsetLocation(offset int32) (Location, bool)
+
+ // NewLocation takes an input line and column and produces a Location.
+ // The default behavior is to treat the line and column as absolute,
+ // but concrete derivations may use this method to convert a relative
+ // line and column position into an absolute location.
+ NewLocation(line, col int) Location
+
+ // Snippet returns a line of content and whether the line was found.
+ Snippet(line int) (string, bool)
+}
+
+// The sourceImpl type implementation of the Source interface.
+type sourceImpl struct {
+ runes.Buffer
+ description string
+ lineOffsets []int32
+}
+
+var _ runes.Buffer = &sourceImpl{}
+
+// TODO(jimlarson) "Character offsets" should index the code points
+// within the UTF-8 encoded string. It currently indexes bytes.
+// Can be accomplished by using rune[] instead of string for contents.
+
+// NewTextSource creates a new Source from the input text string.
+func NewTextSource(text string) Source {
+ return NewStringSource(text, "")
+}
+
+// NewStringSource creates a new Source from the given contents and description.
+func NewStringSource(contents string, description string) Source {
+ // Compute line offsets up front as they are referred to frequently.
+ buf, offs := runes.NewBufferAndLineOffsets(contents)
+ return &sourceImpl{
+ Buffer: buf,
+ description: description,
+ lineOffsets: offs,
+ }
+}
+
+// NewInfoSource creates a new Source from a SourceInfo.
+func NewInfoSource(info *exprpb.SourceInfo) Source {
+ return &sourceImpl{
+ Buffer: runes.NewBuffer(""),
+ description: info.GetLocation(),
+ lineOffsets: info.GetLineOffsets(),
+ }
+}
+
+// Content implements the Source interface method.
+func (s *sourceImpl) Content() string {
+ return s.Slice(0, s.Len())
+}
+
+// Description implements the Source interface method.
+func (s *sourceImpl) Description() string {
+ return s.description
+}
+
+// LineOffsets implements the Source interface method.
+func (s *sourceImpl) LineOffsets() []int32 {
+ return s.lineOffsets
+}
+
+// LocationOffset implements the Source interface method.
+func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
+ if lineOffset, found := s.findLineOffset(location.Line()); found {
+ return lineOffset + int32(location.Column()), true
+ }
+ return -1, false
+}
+
+// NewLocation implements the Source interface method.
+func (s *sourceImpl) NewLocation(line, col int) Location {
+ return NewLocation(line, col)
+}
+
+// OffsetLocation implements the Source interface method.
+func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
+ line, lineOffset := s.findLine(offset)
+ return NewLocation(int(line), int(offset-lineOffset)), true
+}
+
+// Snippet implements the Source interface method.
+func (s *sourceImpl) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || s.Len() == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return s.Slice(int(charStart), int(charEnd-1)), true
+ }
+ return s.Slice(int(charStart), s.Len()), true
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ }
+ if line > 1 && line <= int(len(s.lineOffsets)) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
+
+// findLine finds the line that contains the given character offset and
+// returns the line number and offset of the beginning of that line.
+// Note that the last line is treated as if it contains all offsets
+// beyond the end of the actual source.
+func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
+ var line int32 = 1
+ for _, lineOffset := range s.lineOffsets {
+ if lineOffset > characterOffset {
+ break
+ }
+ line++
+ }
+ if line == 1 {
+ return line, 0
+ }
+ return line, s.lineOffsets[line-2]
+}
diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
new file mode 100644
index 000000000..b55f45215
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "standard.go",
+ ],
+ importpath = "github.com/google/cel-go/common/stdlib",
+ deps = [
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ ],
+)
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go
new file mode 100644
index 000000000..1550c1786
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go
@@ -0,0 +1,620 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stdlib contains all of the standard library function declarations and definitions for CEL.
+package stdlib
+
+import (
+ "github.com/google/cel-go/common/decls"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ stdFunctions []*decls.FunctionDecl
+ stdTypes []*decls.VariableDecl
+)
+
+func init() {
+ paramA := types.NewTypeParamType("A")
+ paramB := types.NewTypeParamType("B")
+ listOfA := types.NewListType(paramA)
+ mapOfAB := types.NewMapType(paramA, paramB)
+
+ stdTypes = []*decls.VariableDecl{
+ decls.TypeVariable(types.BoolType),
+ decls.TypeVariable(types.BytesType),
+ decls.TypeVariable(types.DoubleType),
+ decls.TypeVariable(types.DurationType),
+ decls.TypeVariable(types.IntType),
+ decls.TypeVariable(listOfA),
+ decls.TypeVariable(mapOfAB),
+ decls.TypeVariable(types.NullType),
+ decls.TypeVariable(types.StringType),
+ decls.TypeVariable(types.TimestampType),
+ decls.TypeVariable(types.TypeType),
+ decls.TypeVariable(types.UintType),
+ }
+
+ stdFunctions = []*decls.FunctionDecl{
+ // Logical operators. Special-cased within the interpreter.
+ // Note, the singleton binding prevents extensions from overriding the operator behavior.
+ function(operators.Conditional,
+ decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonFunctionBinding(noFunctionOverrides)),
+ function(operators.LogicalAnd,
+ decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalOr,
+ decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict()),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.LogicalNot,
+ decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ b, ok := val.(types.Bool)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return b.Negate()
+ })),
+
+ // Comprehension short-circuiting related function
+ function(operators.NotStrictlyFalse,
+ decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+ // Deprecated: __not_strictly_false__
+ function(operators.OldNotStrictlyFalse,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType,
+ decls.OverloadIsNonStrict(),
+ decls.UnaryBinding(notStrictlyFalse))),
+
+ // Equality / inequality. Special-cased in the interpreter
+ function(operators.Equals,
+ decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+ function(operators.NotEquals,
+ decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType),
+ decls.SingletonBinaryBinding(noBinaryOverrides)),
+
+ // Mathematical operators
+ function(operators.Add,
+ decls.Overload(overloads.AddBytes,
+ argTypes(types.BytesType, types.BytesType), types.BytesType),
+ decls.Overload(overloads.AddDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.AddDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.AddDurationTimestamp,
+ argTypes(types.DurationType, types.TimestampType), types.TimestampType),
+ decls.Overload(overloads.AddTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.AddInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.AddList,
+ argTypes(listOfA, listOfA), listOfA),
+ decls.Overload(overloads.AddString,
+ argTypes(types.StringType, types.StringType), types.StringType),
+ decls.Overload(overloads.AddUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Adder).Add(rhs)
+ }, traits.AdderType)),
+ function(operators.Divide,
+ decls.Overload(overloads.DivideDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.DivideInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.DivideUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Divider).Divide(rhs)
+ }, traits.DividerType)),
+ function(operators.Modulo,
+ decls.Overload(overloads.ModuloInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.ModuloUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Modder).Modulo(rhs)
+ }, traits.ModderType)),
+ function(operators.Multiply,
+ decls.Overload(overloads.MultiplyDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.MultiplyInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.MultiplyUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Multiplier).Multiply(rhs)
+ }, traits.MultiplierType)),
+ function(operators.Negate,
+ decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ if types.IsBool(val) {
+ return types.MaybeNoSuchOverloadErr(val)
+ }
+ return val.(traits.Negater).Negate()
+ }, traits.NegatorType)),
+ function(operators.Subtract,
+ decls.Overload(overloads.SubtractDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.DoubleType),
+ decls.Overload(overloads.SubtractDurationDuration,
+ argTypes(types.DurationType, types.DurationType), types.DurationType),
+ decls.Overload(overloads.SubtractInt64,
+ argTypes(types.IntType, types.IntType), types.IntType),
+ decls.Overload(overloads.SubtractTimestampDuration,
+ argTypes(types.TimestampType, types.DurationType), types.TimestampType),
+ decls.Overload(overloads.SubtractTimestampTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.DurationType),
+ decls.Overload(overloads.SubtractUint64,
+ argTypes(types.UintType, types.UintType), types.UintType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Subtractor).Subtract(rhs)
+ }, traits.SubtractorType)),
+
+ // Relations operators
+
+ function(operators.Less,
+ decls.Overload(overloads.LessBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne {
+ return types.True
+ }
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.LessEquals,
+ decls.Overload(overloads.LessEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.LessEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.LessEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.LessEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.LessEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.Greater,
+ decls.Overload(overloads.GreaterBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne {
+ return types.True
+ }
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ function(operators.GreaterEquals,
+ decls.Overload(overloads.GreaterEqualsBool,
+ argTypes(types.BoolType, types.BoolType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64,
+ argTypes(types.IntType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Double,
+ argTypes(types.IntType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsInt64Uint64,
+ argTypes(types.IntType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64,
+ argTypes(types.UintType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Double,
+ argTypes(types.UintType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsUint64Int64,
+ argTypes(types.UintType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDouble,
+ argTypes(types.DoubleType, types.DoubleType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleInt64,
+ argTypes(types.DoubleType, types.IntType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDoubleUint64,
+ argTypes(types.DoubleType, types.UintType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsBytes,
+ argTypes(types.BytesType, types.BytesType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsTimestamp,
+ argTypes(types.TimestampType, types.TimestampType), types.BoolType),
+ decls.Overload(overloads.GreaterEqualsDuration,
+ argTypes(types.DurationType, types.DurationType), types.BoolType),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntNegOne {
+ return types.False
+ }
+ return cmp
+ }, traits.ComparerType)),
+
+ // Indexing
+ function(operators.Index,
+ decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA),
+ decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB),
+ decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val {
+ return lhs.(traits.Indexer).Get(rhs)
+ }, traits.IndexerType)),
+
+ // Collections operators
+ function(operators.In,
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(operators.OldIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.DeprecatedIn,
+ decls.DisableDeclaration(true), // safe deprecation
+ decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType),
+ decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType),
+ decls.SingletonBinaryBinding(inAggregate)),
+ function(overloads.Size,
+ decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType),
+ decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType),
+ decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType),
+ decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType),
+ decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType),
+ decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType),
+ decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType),
+ decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType),
+ decls.SingletonUnaryBinding(func(val ref.Val) ref.Val {
+ return val.(traits.Sizer).Size()
+ }, traits.SizerType)),
+
+ // Type conversions
+ function(overloads.TypeConvertType,
+ decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)),
+ decls.SingletonUnaryBinding(convertToType(types.TypeType))),
+
+ // Bool conversions
+ function(overloads.TypeConvertBool,
+ decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType,
+ decls.UnaryBinding(convertToType(types.BoolType)))),
+
+ // Bytes conversions
+ function(overloads.TypeConvertBytes,
+ decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType,
+ decls.UnaryBinding(convertToType(types.BytesType)))),
+
+ // Double conversions
+ function(overloads.TypeConvertDouble,
+ decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType))),
+ decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType,
+ decls.UnaryBinding(convertToType(types.DoubleType)))),
+
+ // Duration conversions
+ function(overloads.TypeConvertDuration,
+ decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType))),
+ decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType,
+ decls.UnaryBinding(convertToType(types.DurationType)))),
+
+ // Dyn conversions
+ function(overloads.TypeConvertDyn,
+ decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType),
+ decls.SingletonUnaryBinding(identity)),
+
+ // Int conversions
+ function(overloads.TypeConvertInt,
+ decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType,
+ decls.UnaryBinding(convertToType(types.IntType))),
+ ),
+
+ // String conversions
+ function(overloads.TypeConvertString,
+ decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType))),
+ decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType,
+ decls.UnaryBinding(convertToType(types.StringType)))),
+
+ // Timestamp conversions
+ function(overloads.TypeConvertTimestamp,
+ decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType))),
+ decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType,
+ decls.UnaryBinding(convertToType(types.TimestampType)))),
+
+ // Uint conversions
+ function(overloads.TypeConvertUint,
+ decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType,
+ decls.UnaryBinding(identity)),
+ decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType))),
+ decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType,
+ decls.UnaryBinding(convertToType(types.UintType)))),
+
+ // String functions
+ function(overloads.Contains,
+ decls.MemberOverload(overloads.ContainsString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringContains)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.EndsWith,
+ decls.MemberOverload(overloads.EndsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringEndsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.StartsWith,
+ decls.MemberOverload(overloads.StartsWithString,
+ argTypes(types.StringType, types.StringType), types.BoolType,
+ decls.BinaryBinding(types.StringStartsWith)),
+ decls.DisableTypeGuards(true)),
+ function(overloads.Matches,
+ decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.MemberOverload(overloads.MatchesString,
+ argTypes(types.StringType, types.StringType), types.BoolType),
+ decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val {
+ return str.(traits.Matcher).Match(pat)
+ }, traits.MatcherType)),
+
+ // Timestamp / duration functions
+ function(overloads.TimeGetFullYear,
+ decls.MemberOverload(overloads.TimestampToYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetMonth,
+ decls.MemberOverload(overloads.TimestampToMonth,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMonthWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfYear,
+ decls.MemberOverload(overloads.TimestampToDayOfYear,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfYearWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfMonth,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDate,
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetDayOfWeek,
+ decls.MemberOverload(overloads.TimestampToDayOfWeek,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType)),
+
+ function(overloads.TimeGetHours,
+ decls.MemberOverload(overloads.TimestampToHours,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToHoursWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToHours,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMinutes,
+ decls.MemberOverload(overloads.TimestampToMinutes,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMinutesWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMinutes,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetSeconds,
+ decls.MemberOverload(overloads.TimestampToSeconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToSecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToSeconds,
+ argTypes(types.DurationType), types.IntType)),
+
+ function(overloads.TimeGetMilliseconds,
+ decls.MemberOverload(overloads.TimestampToMilliseconds,
+ argTypes(types.TimestampType), types.IntType),
+ decls.MemberOverload(overloads.TimestampToMillisecondsWithTz,
+ argTypes(types.TimestampType, types.StringType), types.IntType),
+ decls.MemberOverload(overloads.DurationToMilliseconds,
+ argTypes(types.DurationType), types.IntType)),
+ }
+}
+
+// Functions returns the set of standard library function declarations and definitions for CEL.
+func Functions() []*decls.FunctionDecl {
+ return stdFunctions
+}
+
+// Types returns the set of standard library types for CEL.
+func Types() []*decls.VariableDecl {
+ return stdTypes
+}
+
+func notStrictlyFalse(value ref.Val) ref.Val {
+ if types.IsBool(value) {
+ return value
+ }
+ return types.True
+}
+
+func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
+ if rhs.Type().HasTrait(traits.ContainerType) {
+ return rhs.(traits.Container).Contains(lhs)
+ }
+ return types.ValOrErr(rhs, "no such overload")
+}
+
+func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl {
+ fn, err := decls.NewFunction(name, opts...)
+ if err != nil {
+ panic(err)
+ }
+ return fn
+}
+
+func argTypes(args ...*types.Type) []*types.Type {
+ return args
+}
+
+func noBinaryOverrides(rhs, lhs ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func noFunctionOverrides(args ...ref.Val) ref.Val {
+ return types.NoSuchOverloadErr()
+}
+
+func identity(val ref.Val) ref.Val {
+ return val
+}
+
+func convertToType(t ref.Type) functions.UnaryOp {
+ return func(val ref.Val) ref.Val {
+ return val.ConvertToType(t)
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
new file mode 100644
index 000000000..8f010fae4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -0,0 +1,92 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "any_value.go",
+ "bool.go",
+ "bytes.go",
+ "compare.go",
+ "double.go",
+ "duration.go",
+ "err.go",
+ "int.go",
+ "iterator.go",
+ "json_value.go",
+ "list.go",
+ "map.go",
+ "null.go",
+ "object.go",
+ "optional.go",
+ "overflow.go",
+ "provider.go",
+ "string.go",
+ "timestamp.go",
+ "types.go",
+ "uint.go",
+ "unknown.go",
+ "util.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types",
+ deps = [
+ "//checker/decls:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types/pb:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@com_github_stoewer_go_strcase//:go_default_library",
+ "@dev_cel_expr//:expr",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "bool_test.go",
+ "bytes_test.go",
+ "double_test.go",
+ "duration_test.go",
+ "int_test.go",
+ "json_list_test.go",
+ "json_struct_test.go",
+ "list_test.go",
+ "map_test.go",
+ "null_test.go",
+ "object_test.go",
+ "optional_test.go",
+ "provider_test.go",
+ "string_test.go",
+ "timestamp_test.go",
+ "types_test.go",
+ "uint_test.go",
+ "unknown_test.go",
+ "util_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//common/types/ref:go_default_library",
+ "//test:go_default_library",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/any_value.go b/vendor/github.com/google/cel-go/common/types/any_value.go
new file mode 100644
index 000000000..cda0f13ac
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/any_value.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// anyValueType constant representing the reflected type of google.protobuf.Any.
+var anyValueType = reflect.TypeOf(&anypb.Any{})
diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go
new file mode 100644
index 000000000..565734f3f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bool.go
@@ -0,0 +1,141 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bool type that implements ref.Val and supports comparison and negation.
+type Bool bool
+
+var (
+ // boolWrapperType golang reflected type for protobuf bool wrapper type.
+ boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{})
+)
+
+// Boolean constants
+const (
+ False = Bool(false)
+ True = Bool(true)
+)
+
+// Compare implements the traits.Comparer interface method.
+func (b Bool) Compare(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ if b == otherBool {
+ return IntZero
+ }
+ if !b && otherBool {
+ return IntNegOne
+ }
+ return IntOne
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Bool:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any.
+ return anypb.New(wrapperspb.Bool(bool(b)))
+ case boolWrapperType:
+ // Convert the bool to a wrapperspb.BoolValue.
+ return wrapperspb.Bool(bool(b)), nil
+ case jsonValueType:
+ // Return the bool as a new structpb.Value.
+ return structpb.NewBoolValue(bool(b)), nil
+ default:
+ if typeDesc.Elem().Kind() == reflect.Bool {
+ p := bool(b)
+ return &p, nil
+ }
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatBool(bool(b)))
+ case BoolType:
+ return b
+ case TypeType:
+ return BoolType
+ }
+ return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bool) Equal(other ref.Val) ref.Val {
+ otherBool, ok := other.(Bool)
+ return Bool(ok && b == otherBool)
+}
+
+// IsZeroValue returns true if the boolean value is false.
+func (b Bool) IsZeroValue() bool {
+ return b == False
+}
+
+// Negate implements the traits.Negater interface method.
+func (b Bool) Negate() ref.Val {
+ return !b
+}
+
+// Type implements the ref.Val interface method.
+func (b Bool) Type() ref.Type {
+ return BoolType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bool) Value() any {
+ return bool(b)
+}
+
+// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
+func IsBool(elem ref.Val) bool {
+ switch v := elem.(type) {
+ case Bool:
+ return true
+ case ref.Val:
+ return v.Type() == BoolType
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go
new file mode 100644
index 000000000..7e813e291
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -0,0 +1,140 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "unicode/utf8"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Bytes type that implements ref.Val and supports add, compare, and size
+// operations.
+type Bytes []byte
+
+var (
+ // byteWrapperType golang reflected type for protobuf bytes wrapper type.
+ byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{})
+)
+
+// Add implements traits.Adder interface method by concatenating byte sequences.
+func (b Bytes) Add(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return append(b, otherBytes...)
+}
+
+// Compare implements traits.Comparer interface method by lexicographic ordering.
+func (b Bytes) Compare(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ if !ok {
+ return ValOrErr(other, "no such overload")
+ }
+ return Int(bytes.Compare(b, otherBytes))
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array:
+ if len(b) != typeDesc.Len() {
+ return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len())
+ }
+ refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem()))
+ refArr := refArrPtr.Elem()
+ for i, byt := range b {
+ refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem()))
+ }
+ return refArr.Interface(), nil
+ case reflect.Slice:
+ return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Bytes([]byte(b)))
+ case byteWrapperType:
+ // Convert the bytes to a wrapperspb.BytesValue.
+ return wrapperspb.Bytes([]byte(b)), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64.
+ // The encoding below matches the golang 'encoding/json' behavior during marshaling,
+ // which uses base64.StdEncoding.
+ str := base64.StdEncoding.EncodeToString([]byte(b))
+ return structpb.NewStringValue(str), nil
+ }
+ case reflect.Interface:
+ bv := b.Value()
+ if reflect.TypeOf(bv).Implements(typeDesc) {
+ return bv, nil
+ }
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ if !utf8.Valid(b) {
+ return NewErr("invalid UTF-8 in bytes, cannot convert to string")
+ }
+ return String(b)
+ case BytesType:
+ return b
+ case TypeType:
+ return BytesType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (b Bytes) Equal(other ref.Val) ref.Val {
+ otherBytes, ok := other.(Bytes)
+ return Bool(ok && bytes.Equal(b, otherBytes))
+}
+
+// IsZeroValue returns true if the byte array is empty.
+func (b Bytes) IsZeroValue() bool {
+ return len(b) == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (b Bytes) Size() ref.Val {
+ return Int(len(b))
+}
+
+// Type implements the ref.Val interface method.
+func (b Bytes) Type() ref.Type {
+ return BytesType
+}
+
+// Value implements the ref.Val interface method.
+func (b Bytes) Value() any {
+ return []byte(b)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/compare.go b/vendor/github.com/google/cel-go/common/types/compare.go
new file mode 100644
index 000000000..e19682618
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/compare.go
@@ -0,0 +1,97 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+func compareDoubleInt(d Double, i Int) Int {
+ if d < math.MinInt64 {
+ return IntNegOne
+ }
+ if d > math.MaxInt64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(i))
+}
+
+func compareIntDouble(i Int, d Double) Int {
+ return -compareDoubleInt(d, i)
+}
+
+func compareDoubleUint(d Double, u Uint) Int {
+ if d < 0 {
+ return IntNegOne
+ }
+ if d > math.MaxUint64 {
+ return IntOne
+ }
+ return compareDouble(d, Double(u))
+}
+
+func compareUintDouble(u Uint, d Double) Int {
+ return -compareDoubleUint(d, u)
+}
+
+func compareIntUint(i Int, u Uint) Int {
+ if i < 0 || u > math.MaxInt64 {
+ return IntNegOne
+ }
+ cmp := i - Int(u)
+ if cmp < 0 {
+ return IntNegOne
+ }
+ if cmp > 0 {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUintInt(u Uint, i Int) Int {
+ return -compareIntUint(i, u)
+}
+
+func compareDouble(a, b Double) Int {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareInt(a, b Int) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
+
+func compareUint(a, b Uint) ref.Val {
+ if a < b {
+ return IntNegOne
+ }
+ if a > b {
+ return IntOne
+ }
+ return IntZero
+}
diff --git a/vendor/github.com/google/cel-go/common/types/doc.go b/vendor/github.com/google/cel-go/common/types/doc.go
new file mode 100644
index 000000000..5f641d704
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types contains the types, traits, and utilities common to all
+// components of expression handling.
+package types
diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go
new file mode 100644
index 000000000..027e78978
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/double.go
@@ -0,0 +1,211 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Double type that implements ref.Val, comparison, and mathematical
+// operations.
+type Double float64
+
+var (
+ // doubleWrapperType reflected type for protobuf double wrapper type.
+ doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{})
+
+ // floatWrapperType reflected type for protobuf float wrapper type.
+ floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (d Double) Add(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d + otherDouble
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Double) Compare(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareDouble(d, ov)
+ case Int:
+ return compareDoubleInt(d, ov)
+ case Uint:
+ return compareDoubleUint(d, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Double(float64(d)))
+ case doubleWrapperType:
+ // Convert to a wrapperspb.DoubleValue
+ return wrapperspb.Double(float64(d)), nil
+ case floatWrapperType:
+ // Convert to a wrapperspb.FloatValue (with truncation).
+ return wrapperspb.Float(float32(d)), nil
+ case jsonValueType:
+ // Note, there are special cases for proto3 to json conversion that
+ // expect the floating point value to be converted to a NaN,
+ // Infinity, or -Infinity string values, but the jsonpb string
+ // marshaling of the protobuf.Value will handle this conversion.
+ return structpb.NewNumberValue(float64(d)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Float32:
+ v := float32(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Float64:
+ v := float64(d)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ dv := d.Value()
+ if reflect.TypeOf(dv).Implements(typeDesc) {
+ return dv, nil
+ }
+ if reflect.TypeOf(d).Implements(typeDesc) {
+ return d, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ i, err := doubleToInt64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(i)
+ case UintType:
+ i, err := doubleToUint64Checked(float64(d))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(i)
+ case DoubleType:
+ return d
+ case StringType:
+ return String(fmt.Sprintf("%g", float64(d)))
+ case TypeType:
+ return DoubleType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (d Double) Divide(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d / otherDouble
+}
+
+// Equal implements ref.Val.Equal.
+func (d Double) Equal(other ref.Val) ref.Val {
+ if math.IsNaN(float64(d)) {
+ return False
+ }
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(d == ov)
+ case Int:
+ return Bool(compareDoubleInt(d, ov) == 0)
+ case Uint:
+ return Bool(compareDoubleUint(d, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if double value is 0.0
+func (d Double) IsZeroValue() bool {
+ return float64(d) == 0.0
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (d Double) Multiply(other ref.Val) ref.Val {
+ otherDouble, ok := other.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return d * otherDouble
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Double) Negate() ref.Val {
+ return -d
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Double) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDouble, ok := subtrahend.(Double)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ return d - subtraDouble
+}
+
+// Type implements ref.Val.Type.
+func (d Double) Type() ref.Type {
+ return DoubleType
+}
+
+// Value implements ref.Val.Value.
+func (d Double) Value() any {
+ return float64(d)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go
new file mode 100644
index 000000000..596e56d6b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/duration.go
@@ -0,0 +1,222 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Duration type that implements ref.Val and supports add, compare, negate,
+// and subtract operators. This type is also a receiver which means it can
+// participate in dispatch to receiver functions.
+type Duration struct {
+ time.Duration
+}
+
+func durationOf(d time.Duration) Duration {
+ return Duration{Duration: d}
+}
+
+var (
+ durationValueType = reflect.TypeOf(&dpb.Duration{})
+
+ durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{
+ overloads.TimeGetHours: DurationGetHours,
+ overloads.TimeGetMinutes: DurationGetMinutes,
+ overloads.TimeGetSeconds: DurationGetSeconds,
+ overloads.TimeGetMilliseconds: DurationGetMilliseconds,
+ }
+)
+
+// Add implements traits.Adder.Add.
+func (d Duration) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ dur2 := other.(Duration)
+ val, err := addDurationChecked(d.Duration, dur2.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ case TimestampType:
+ ts := other.(Timestamp).Time
+ val, err := addTimeDurationChecked(ts, d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Duration) Compare(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ d1 := d.Duration
+ d2 := otherDur.Duration
+ switch {
+ case d1 < d2:
+ return IntNegOne
+ case d1 > d2:
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the duration is already assignable to the desired type return it.
+ if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
+ return d.Duration, nil
+ }
+ if reflect.TypeOf(d).AssignableTo(typeDesc) {
+ return d, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the duration as a dpb.Duration into an Any value.
+ return anypb.New(dpb.New(d.Duration))
+ case durationValueType:
+ // Unwrap the CEL value to its underlying proto value.
+ return dpb.New(d.Duration), nil
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion.
+ // Note, using jsonpb would wrap the result in extra double quotes.
+ v := d.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s")
+ case IntType:
+ return Int(d.Duration)
+ case DurationType:
+ return d
+ case TypeType:
+ return DurationType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d Duration) Equal(other ref.Val) ref.Val {
+ otherDur, ok := other.(Duration)
+ return Bool(ok && d.Duration == otherDur.Duration)
+}
+
+// IsZeroValue returns true if the duration value is zero
+func (d Duration) IsZeroValue() bool {
+ return d.Duration == 0
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Duration) Negate() ref.Val {
+ val, err := negateDurationChecked(d.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
+ if len(args) == 0 {
+ if f, found := durationZeroArgOverloads[function]; found {
+ return f(d)
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
+ subtraDur, ok := subtrahend.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+}
+
+// Type implements ref.Val.Type.
+func (d Duration) Type() ref.Type {
+ return DurationType
+}
+
+// Value implements ref.Val.Value.
+func (d Duration) Value() any {
+ return d.Duration
+}
+
+// DurationGetHours returns the duration in hours.
+func DurationGetHours(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Hours())
+}
+
+// DurationGetMinutes returns duration in minutes.
+func DurationGetMinutes(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Minutes())
+}
+
+// DurationGetSeconds returns duration in seconds.
+func DurationGetSeconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Seconds())
+}
+
+// DurationGetMilliseconds returns duration in milliseconds.
+func DurationGetMilliseconds(val ref.Val) ref.Val {
+ dur, ok := val.(Duration)
+ if !ok {
+ return MaybeNoSuchOverloadErr(val)
+ }
+ return Int(dur.Milliseconds())
+}
diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go
new file mode 100644
index 000000000..9c9d9e21e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/err.go
@@ -0,0 +1,169 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Error interface which allows types types.Err values to be treated as error values.
+type Error interface {
+ error
+ ref.Val
+}
+
+// Err type which extends the built-in go error and implements ref.Val.
+type Err struct {
+ error
+ id int64
+}
+
+var (
+ // ErrType singleton.
+ ErrType = NewOpaqueType("error")
+
+ // errDivideByZero is an error indicating a division by zero of an integer value.
+ errDivideByZero = errors.New("division by zero")
+ // errModulusByZero is an error indicating a modulus by zero of an integer value.
+ errModulusByZero = errors.New("modulus by zero")
+ // errIntOverflow is an error representing integer overflow.
+ errIntOverflow = errors.New("integer overflow")
+ // errUintOverflow is an error representing unsigned integer overflow.
+ errUintOverflow = errors.New("unsigned integer overflow")
+ // errDurationOverflow is an error representing duration overflow.
+ errDurationOverflow = errors.New("duration overflow")
+ // errTimestampOverflow is an error representing timestamp overflow.
+ errTimestampOverflow = errors.New("timestamp overflow")
+ celErrTimestampOverflow = &Err{error: errTimestampOverflow}
+
+ // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature.
+ celErrNoSuchOverload = NewErr("no such overload")
+)
+
+// NewErr creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErr(format string, args ...any) ref.Val {
+ return &Err{error: fmt.Errorf(format, args...)}
+}
+
+// NewErrWithNodeID creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErrWithNodeID(id int64, format string, args ...any) ref.Val {
+ return &Err{error: fmt.Errorf(format, args...), id: id}
+}
+
+// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero
+// AST node ID already present. Otherwise the id is added to the error for
+// recovery with the Err.NodeID method.
+func LabelErrNode(id int64, val ref.Val) ref.Val {
+ if err, ok := val.(*Err); ok && err.id == 0 {
+ err.id = id
+ return err
+ }
+ return val
+}
+
+// NoSuchOverloadErr returns a new types.Err instance with a no such overload message.
+func NoSuchOverloadErr() ref.Val {
+ return celErrNoSuchOverload
+}
+
+// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
+// message that indicates that the native value could not be converted to a CEL ref.Val.
+func UnsupportedRefValConversionErr(val any) ref.Val {
+ return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
+}
+
+// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types,
+// else a new no such overload error.
+func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
+ return ValOrErr(val, "no such overload")
+}
+
+// ValOrErr either returns the existing error or creates a new one.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
+ if val == nil || !IsUnknownOrError(val) {
+ return NewErr(format, args...)
+ }
+ return val
+}
+
+// WrapErr wraps an existing Go error value into a CEL Err value.
+func WrapErr(err error) ref.Val {
+ return &Err{error: err}
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, e.error
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
+ // Errors are not convertible to other representations.
+ return e
+}
+
+// Equal implements ref.Val.Equal.
+func (e *Err) Equal(other ref.Val) ref.Val {
+ // An error cannot be equal to any other value, so it returns itself.
+ return e
+}
+
+// String implements fmt.Stringer.
+func (e *Err) String() string {
+ return e.error.Error()
+}
+
+// Type implements ref.Val.Type.
+func (e *Err) Type() ref.Type {
+ return ErrType
+}
+
+// Value implements ref.Val.Value.
+func (e *Err) Value() any {
+ return e.error
+}
+
+// NodeID returns the AST node ID of the expression that returned the error.
+func (e *Err) NodeID() int64 {
+ return e.id
+}
+
+// Is implements errors.Is.
+func (e *Err) Is(target error) bool {
+ return e.error.Error() == target.Error()
+}
+
+// Unwrap implements errors.Unwrap.
+func (e *Err) Unwrap() error {
+ return e.error
+}
+
+// IsError returns whether the input element ref.Type or ref.Val is equal to
+// the ErrType singleton.
+func IsError(val ref.Val) bool {
+ switch val.(type) {
+ case *Err:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go
new file mode 100644
index 000000000..0ae9507c3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/int.go
@@ -0,0 +1,303 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Int type that implements ref.Val as well as comparison and math operators.
+type Int int64
+
+// Int constants used for comparison results.
+const (
+ // IntZero is the zero-value for Int
+ IntZero = Int(0)
+ IntOne = Int(1)
+ IntNegOne = Int(-1)
+)
+
+var (
+ // int32WrapperType reflected type for protobuf int32 wrapper type.
+ int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{})
+
+ // int64WrapperType reflected type for protobuf int64 wrapper type.
+ int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{})
+)
+
+// Add implements traits.Adder.Add.
+func (i Int) Add(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Int) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareIntDouble(i, ov)
+ case Int:
+ return compareInt(i, ov)
+ case Uint:
+ return compareIntUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int, reflect.Int32:
+ // Enums are also mapped as int32 derivations.
+ // Note, the code doesn't convert to the enum value directly since this is not known, but
+ // the net effect with respect to proto-assignment is handled correctly by the reflection
+ // Convert method.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int8:
+ v, err := int64ToInt8Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int16:
+ v, err := int64ToInt16Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Int64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.Int64(int64(i)))
+ case int32WrapperType:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ return wrapperspb.Int32(v), nil
+ case int64WrapperType:
+ // Convert the value to a wrapperspb.Int64Value.
+ return wrapperspb.Int64(int64(i)), nil
+ case jsonValueType:
+ // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON
+ // decimal strings. Because CEL ints might come from the automatic widening of 32-bit
+ // values in protos, the JSON type is chosen dynamically based on the value.
+ //
+ // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers.
+ // - Integers outside this range are encoded as JSON strings.
+ //
+ // The integer to float range represents the largest interval where such a conversion
+ // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON
+ // number as with protobuf. Those consuming JSON from a 64-bit source must be able to
+ // handle either a JSON number or a JSON decimal string. To handle these cases safely
+ // the string values must be explicitly converted to int() within a CEL expression;
+ // however, it is best to simply stay within the JSON number range when building JSON
+ // objects in CEL.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted int64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Int32:
+ // Convert the value to a wrapperspb.Int32Value, error on overflow.
+ v, err := int64ToInt32Checked(int64(i))
+ if err != nil {
+ return nil, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Int64:
+ v := int64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return i
+ case UintType:
+ u, err := int64ToUint64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(u)
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", int64(i)))
+ case TimestampType:
+ // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number
+ // of seconds between year 1 and year 1970. See comments on unixToInternal.
+ if int64(i) < minUnixTime || int64(i) > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(time.Unix(int64(i), 0).UTC())
+ case TypeType:
+ return IntType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Int) Divide(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := divideInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Int) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareIntDouble(i, ov) == 0)
+ case Int:
+ return Bool(i == ov)
+ case Uint:
+ return Bool(compareIntUint(i, ov) == 0)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if integer is equal to 0
+func (i Int) IsZeroValue() bool {
+ return i == IntZero
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Int) Modulo(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := moduloInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Int) Multiply(other ref.Val) ref.Val {
+ otherInt, ok := other.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyInt64Checked(int64(i), int64(otherInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Negate implements traits.Negater.Negate.
+func (i Int) Negate() ref.Val {
+ val, err := negateInt64Checked(int64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Int) Subtract(subtrahend ref.Val) ref.Val {
+ subtraInt, ok := subtrahend.(Int)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractInt64Checked(int64(i), int64(subtraInt))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Int) Type() ref.Type {
+ return IntType
+}
+
+// Value implements ref.Val.Value.
+func (i Int) Value() any {
+ return int64(i)
+}
+
+// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON.
+func (i Int) isJSONSafe() bool {
+ return i >= minIntJSON && i <= maxIntJSON
+}
+
+const (
+ // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6.
+ maxIntJSON = 1<<53 - 1
+ // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6.
+ minIntJSON = -maxIntJSON
+)
diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go
new file mode 100644
index 000000000..98e9147b6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -0,0 +1,55 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // IteratorType singleton.
+ IteratorType = NewObjectType("iterator", traits.IteratorType)
+)
+
+// baseIterator is the basis for list, map, and object iterators.
+//
+// An iterator in and of itself should not be a valid value for comparison, but must implement the
+// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
+// interpreter.
+type baseIterator struct{}
+
+func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion on iterators not supported")
+}
+
+func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Equal(other ref.Val) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (*baseIterator) Type() ref.Type {
+ return IteratorType
+}
+
+func (*baseIterator) Value() any {
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go
new file mode 100644
index 000000000..13a4efe7a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -0,0 +1,29 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// JSON type constants representing the reflected types of protobuf JSON values.
+var (
+ jsonValueType = reflect.TypeOf(&structpb.Value{})
+ jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+ jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
+)
diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go
new file mode 100644
index 000000000..ca47d39fe
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/list.go
@@ -0,0 +1,574 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicList returns a traits.Lister with heterogenous elements.
+// value should be an array of "native" types, i.e. any type that
+// NativeToValue() can convert to a ref.Val.
+func NewDynamicList(adapter Adapter, value any) traits.Lister {
+ refValue := reflect.ValueOf(value)
+ return &baseList{
+ Adapter: adapter,
+ value: value,
+ size: refValue.Len(),
+ get: func(i int) any {
+ return refValue.Index(i).Interface()
+ },
+ }
+}
+
+// NewStringList returns a traits.Lister containing only strings.
+func NewStringList(adapter Adapter, elems []string) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewRefValList returns a traits.Lister with ref.Val elements.
+//
+// This type specialization is used with list literals within CEL expressions.
+func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: elems,
+ size: len(elems),
+ get: func(i int) any { return elems[i] },
+ }
+}
+
+// NewProtoList returns a traits.Lister based on a pb.List instance.
+func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister {
+ return &baseList{
+ Adapter: adapter,
+ value: list,
+ size: list.Len(),
+ get: func(i int) any { return list.Get(i).Interface() },
+ }
+}
+
+// NewJSONList returns a traits.Lister based on structpb.ListValue instance.
+func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister {
+ vals := l.GetValues()
+ return &baseList{
+ Adapter: adapter,
+ value: l,
+ size: len(vals),
+ get: func(i int) any { return vals[i] },
+ }
+}
+
+// NewMutableList creates a new mutable list whose internal state can be modified.
+func NewMutableList(adapter Adapter) traits.MutableLister {
+ var mutableValues []ref.Val
+ l := &mutableList{
+ baseList: &baseList{
+ Adapter: adapter,
+ value: mutableValues,
+ size: 0,
+ },
+ mutableValues: mutableValues,
+ }
+ l.get = func(i int) any {
+ return l.mutableValues[i]
+ }
+ return l
+}
+
+// baseList points to a list containing elements of any type.
+// The `value` is an array of native values, and refValue is its reflection object.
+// The `Adapter` enables native type to CEL type conversions.
+type baseList struct {
+ Adapter
+ value any
+
+ // size indicates the number of elements within the list.
+ // Since objects are immutable the size of a list is static.
+ size int
+
+ // get returns a value at the specified integer index.
+ // The index is guaranteed to be checked against the list index range.
+ get func(int) any
+}
+
+// Add implements the traits.Adder interface method.
+func (l *baseList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *baseList) Contains(elem ref.Val) ref.Val {
+ for i := 0; i < l.size; i++ {
+ val := l.NativeToValue(l.get(i))
+ cmp := elem.Equal(val)
+ b, ok := cmp.(Bool)
+ if ok && b == True {
+ return True
+ }
+ }
+ return False
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the underlying list value is assignable to the reflected type return it.
+ if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
+ return l.value, nil
+ }
+ // If the list wrapper is assignable to the desired type return it.
+ if reflect.TypeOf(l).AssignableTo(typeDesc) {
+ return l, nil
+ }
+ // Attempt to convert the list to a set of well known protobuf types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := l.ConvertToNative(jsonListValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonListValueType:
+ jsonValues, err :=
+ l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
+ if typeDesc == jsonListValueType {
+ return jsonList, nil
+ }
+ return structpb.NewListValue(jsonList), nil
+ }
+ // Non-list conversion.
+ if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
+ return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
+ }
+
+ // List conversion.
+ // Allow the element ConvertToNative() function to determine whether conversion is possible.
+ otherElemType := typeDesc.Elem()
+ elemCount := l.size
+ var nativeList reflect.Value
+ if typeDesc.Kind() == reflect.Array {
+ nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0)
+ } else {
+ nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount)
+
+ }
+ for i := 0; i < elemCount; i++ {
+ elem := l.NativeToValue(l.get(i))
+ nativeElemVal, err := elem.ConvertToNative(otherElemType)
+ if err != nil {
+ return nil, err
+ }
+ nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
+ }
+ return nativeList.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *baseList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *baseList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ if ind < 0 || ind >= l.size {
+ return NewErr("index '%d' out of range in list size '%d'", ind, l.Size())
+ }
+ return l.NativeToValue(l.get(ind))
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *baseList) IsZeroValue() bool {
+ return l.size == 0
+}
+
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *baseList) Fold(f traits.Folder) {
+ for i := 0; i < l.size; i++ {
+ if !f.FoldEntry(i, l.get(i)) {
+ break
+ }
+ }
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *baseList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *baseList) Size() ref.Val {
+ return Int(l.size)
+}
+
+// Type implements the ref.Val interface method.
+func (l *baseList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *baseList) Value() any {
+ return l.value
+}
+
+// String converts the list to a human readable string form.
+func (l *baseList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := 0; i < l.size; i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.get(i)))
+ if i != l.size-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
+type mutableList struct {
+ *baseList
+ mutableValues []ref.Val
+}
+
+// Add copies elements from the other list into the internal storage of the mutable list.
+// The ref.Val returned by Add is the receiver.
+func (l *mutableList) Add(other ref.Val) ref.Val {
+ switch otherList := other.(type) {
+ case *mutableList:
+ l.mutableValues = append(l.mutableValues, otherList.mutableValues...)
+ l.size += len(otherList.mutableValues)
+ case traits.Lister:
+ for i := IntZero; i < otherList.Size().(Int); i++ {
+ l.size++
+ l.mutableValues = append(l.mutableValues, otherList.Get(i))
+ }
+ default:
+ return MaybeNoSuchOverloadErr(otherList)
+ }
+ return l
+}
+
+// ToImmutableList returns an immutable list based on the internal storage of the mutable list.
+func (l *mutableList) ToImmutableList() traits.Lister {
+ // The reference to internal state is guaranteed to be safe as this call is only performed
+ // when mutations have been completed.
+ return NewRefValList(l.Adapter, l.mutableValues)
+}
+
+// concatList combines two list implementations together into a view.
+// The `Adapter` enables native type to CEL type conversions.
+type concatList struct {
+ Adapter
+ value any
+ prevList traits.Lister
+ nextList traits.Lister
+}
+
+// Add implements the traits.Adder interface method.
+func (l *concatList) Add(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if otherList.Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ Adapter: l.Adapter,
+ prevList: l,
+ nextList: otherList}
+}
+
+// Contains implements the traits.Container interface method.
+func (l *concatList) Contains(elem ref.Val) ref.Val {
+ // The concat list relies on the IsErrorOrUnknown checks against the input element to be
+ // performed by the `prevList` and/or `nextList`.
+ prev := l.prevList.Contains(elem)
+ // Short-circuit the return if the elem was found in the prev list.
+ if prev == True {
+ return prev
+ }
+ // Return if the elem was found in the next list.
+ next := l.nextList.Contains(elem)
+ if next == True {
+ return next
+ }
+ // Handle the case where an error or unknown was encountered before checking next.
+ if IsUnknownOrError(prev) {
+ return prev
+ }
+ // Otherwise, rely on the next value as the representative result.
+ return next
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ combined := NewDynamicList(l.Adapter, l.Value().([]any))
+ return combined.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (l *concatList) Equal(other ref.Val) ref.Val {
+ otherList, ok := other.(traits.Lister)
+ if !ok {
+ return False
+ }
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ var maybeErr ref.Val
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := Equal(thisElem, otherElem)
+ if elemEq == False {
+ return False
+ }
+ if maybeErr == nil && IsUnknownOrError(elemEq) {
+ maybeErr = elemEq
+ }
+ }
+ if maybeErr != nil {
+ return maybeErr
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (l *concatList) Get(index ref.Val) ref.Val {
+ ind, err := IndexOrError(index)
+ if err != nil {
+ return ValOrErr(index, err.Error())
+ }
+ i := Int(ind)
+ if i < l.prevList.Size().(Int) {
+ return l.prevList.Get(i)
+ }
+ offset := i - l.prevList.Size().(Int)
+ return l.nextList.Get(offset)
+}
+
+// IsZeroValue returns true if the list is empty.
+func (l *concatList) IsZeroValue() bool {
+ return l.Size().(Int) == 0
+}
+
+// Fold calls the FoldEntry method for each (index, value) pair in the list.
+func (l *concatList) Fold(f traits.Folder) {
+ for i := Int(0); i < l.Size().(Int); i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (l *concatList) Iterator() traits.Iterator {
+ return newListIterator(l)
+}
+
+// Size implements the traits.Sizer interface method.
+func (l *concatList) Size() ref.Val {
+ return l.prevList.Size().(Int).Add(l.nextList.Size())
+}
+
+// String converts the concatenated list to a human-readable string.
+func (l *concatList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := Int(0); i < l.Size().(Int); i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.Get(i)))
+ if i != l.Size().(Int)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (l *concatList) Type() ref.Type {
+ return ListType
+}
+
+// Value implements the ref.Val interface method.
+func (l *concatList) Value() any {
+ if l.value == nil {
+ merged := make([]any, l.Size().(Int))
+ prevLen := l.prevList.Size().(Int)
+ for i := Int(0); i < prevLen; i++ {
+ merged[i] = l.prevList.Get(i).Value()
+ }
+ nextLen := l.nextList.Size().(Int)
+ for j := Int(0); j < nextLen; j++ {
+ merged[prevLen+j] = l.nextList.Get(j).Value()
+ }
+ l.value = merged
+ }
+ return l.value
+}
+
+func newListIterator(listValue traits.Lister) traits.Iterator {
+ return &listIterator{
+ listValue: listValue,
+ len: listValue.Size().(Int),
+ }
+}
+
+type listIterator struct {
+ *baseIterator
+ listValue traits.Lister
+ cursor Int
+ len Int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *listIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *listIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return it.listValue.Get(index)
+ }
+ return nil
+}
+
+// IndexOrError converts an input index value into either a lossless integer index or an error.
+func IndexOrError(index ref.Val) (int, error) {
+ switch iv := index.(type) {
+ case Int:
+ return int(iv), nil
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok {
+ return int(ik), nil
+ }
+ return -1, fmt.Errorf("unsupported index value %v in list", index)
+ default:
+ return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type())
+ }
+}
+
+// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold is
+// driven via the Size() and Get() calls which means that the folding will function, but take a
+// performance hit.
+func ToFoldableList(l traits.Lister) traits.Foldable {
+ if f, ok := l.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableList{Lister: l}
+}
+
+type interopFoldableList struct {
+ traits.Lister
+}
+
+// Fold implements the traits.Foldable interface method and performs an iteration over the
+// range of elements of the list.
+func (l interopFoldableList) Fold(f traits.Folder) {
+ sz := l.Size().(Int)
+ for i := Int(0); i < sz; i++ {
+ if !f.FoldEntry(i, l.Get(i)) {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go
new file mode 100644
index 000000000..cb6cce78b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/map.go
@@ -0,0 +1,1002 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/stoewer/go-strcase"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
+func NewDynamicMap(adapter Adapter, value any) traits.Mapper {
+ refValue := reflect.ValueOf(value)
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newReflectMapAccessor(adapter, refValue),
+ value: value,
+ size: refValue.Len(),
+ }
+}
+
+// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
+// encoded in protocol buffer form.
+//
+// The `adapter` argument provides type adaptation capabilities from proto to CEL.
+func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper {
+ fields := value.GetFields()
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newJSONStructAccessor(adapter, fields),
+ value: value,
+ size: len(fields),
+ }
+}
+
+// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values.
+func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
+func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringIfaceMapAccessor(adapter, value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
+func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper {
+ return &baseMap{
+ Adapter: adapter,
+ mapAccessor: newStringMapAccessor(value),
+ value: value,
+ size: len(value),
+ }
+}
+
+// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values.
+func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper {
+ return &protoMap{
+ Adapter: adapter,
+ value: value,
+ }
+}
+
+// NewMutableMap constructs a mutable map from an adapter and a set of map values.
+func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper {
+ mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues))
+ for k, v := range mutableValues {
+ mutableCopy[k] = v
+ }
+ m := &mutableMap{
+ baseMap: &baseMap{
+ Adapter: adapter,
+ mapAccessor: newRefValMapAccessor(mutableCopy),
+ value: mutableCopy,
+ size: len(mutableCopy),
+ },
+ mutableValues: mutableCopy,
+ }
+ return m
+}
+
+// mapAccessor is a private interface for finding values within a map and iterating over the keys.
+// This interface implements portions of the API surface area required by the traits.Mapper
+// interface.
+type mapAccessor interface {
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ Find(ref.Val) (ref.Val, bool)
+
+ // Iterator returns an Iterator over the map key set.
+ Iterator() traits.Iterator
+
+ // Fold calls the FoldEntry method for each (key, value) pair in the map.
+ Fold(traits.Folder)
+}
+
+// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
+//
+// Since CEL is side-effect free, the base map represents an immutable object.
+type baseMap struct {
+ // TypeAdapter used to convert keys and values accessed within the map.
+ Adapter
+
+ // mapAccessor interface implementation used to find and iterate over map keys.
+ mapAccessor
+
+ // value is the native Go value upon which the map type operators.
+ value any
+
+ // size is the number of entries in the map.
+ size int
+}
+
+// Contains implements the traits.Container interface method.
+func (m *baseMap) Contains(index ref.Val) ref.Val {
+ _, found := m.Find(index)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+
+ // Unwrap pointers, but track their use.
+ isPtr := false
+ if typeDesc.Kind() == reflect.Ptr {
+ tk := typeDesc
+ typeDesc = typeDesc.Elem()
+ if typeDesc.Kind() == reflect.Ptr {
+ return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
+ }
+ isPtr = true
+ }
+ switch typeDesc.Kind() {
+ // Map conversion.
+ case reflect.Map:
+ otherKey := typeDesc.Key()
+ otherElem := typeDesc.Elem()
+ nativeMap := reflect.MakeMapWithSize(typeDesc, m.size)
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ refKeyValue, err := key.ConvertToNative(otherKey)
+ if err != nil {
+ return nil, err
+ }
+ refElemValue, err := m.Get(key).ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue))
+ }
+ return nativeMap.Interface(), nil
+ case reflect.Struct:
+ nativeStructPtr := reflect.New(typeDesc)
+ nativeStruct := nativeStructPtr.Elem()
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ // Ensure the field name being referenced is exported.
+ // Only exported (public) field names can be set by reflection, where the name
+ // must be at least one character in length and start with an upper-case letter.
+ fieldName := key.ConvertToType(StringType)
+ if IsError(fieldName) {
+ return nil, fieldName.(*Err)
+ }
+ name := string(fieldName.(String))
+ name = strcase.UpperCamelCase(name)
+ fieldRef := nativeStruct.FieldByName(name)
+ if !fieldRef.IsValid() {
+ return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc)
+ }
+ fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type())
+ if err != nil {
+ return nil, err
+ }
+ fieldRef.Set(reflect.ValueOf(fieldValue))
+ }
+ if isPtr {
+ return nativeStructPtr.Interface(), nil
+ }
+ return nativeStruct.Interface(), nil
+ }
+ return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *baseMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.Size() != otherMap.Size() {
+ return False
+ }
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ thisVal, _ := m.Find(key)
+ otherVal, found := otherMap.Find(key)
+ if !found {
+ return False
+ }
+ valEq := Equal(thisVal, otherVal)
+ if valEq == False {
+ return False
+ }
+ }
+ return True
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *baseMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *baseMap) IsZeroValue() bool {
+ return m.size == 0
+}
+
+// Size implements the traits.Sizer interface method.
+func (m *baseMap) Size() ref.Val {
+ return Int(m.size)
+}
+
+// String converts the map into a human-readable string.
+func (m *baseMap) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ it := m.Iterator()
+ i := 0
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ sb.WriteString(fmt.Sprintf("%v: %v", k, v))
+ if i != m.size-1 {
+ sb.WriteString(", ")
+ }
+ i++
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+// Type implements the ref.Val interface method.
+func (m *baseMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *baseMap) Value() any {
+ return m.value
+}
+
+// mutableMap holds onto a set of mutable values which are used for intermediate computations.
+type mutableMap struct {
+ *baseMap
+ mutableValues map[ref.Val]ref.Val
+}
+
+// Insert implements the traits.MutableMapper interface method, returning true if the key insertion
+// succeeds.
+func (m *mutableMap) Insert(k, v ref.Val) ref.Val {
+ if _, found := m.Find(k); found {
+ return NewErr("insert failed: key %v already exists", k)
+ }
+ m.mutableValues[k] = v
+ return m
+}
+
+// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map
+// an immutable map implementation.
+func (m *mutableMap) ToImmutableMap() traits.Mapper {
+ return NewRefValMap(m.Adapter, m.mutableValues)
+}
+
+func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor {
+ return &jsonStructAccessor{
+ Adapter: adapter,
+ st: st,
+ }
+}
+
+type jsonStructAccessor struct {
+ Adapter
+ st map[string]*structpb.Value
+}
+
+// Find searches the json struct field map for the input key value and returns (value, true) if
+// found.
+//
+// If the key is not found the function returns (nil, false).
+func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.st[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the set of JSON struct field names.
+func (a *jsonStructAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.st))
+ i := 0
+ for k := range a.st {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *jsonStructAccessor) Fold(f traits.Folder) {
+ for k, v := range a.st {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor {
+ keyType := value.Type().Key()
+ return &reflectMapAccessor{
+ Adapter: adapter,
+ refValue: value,
+ keyType: keyType,
+ }
+}
+
+type reflectMapAccessor struct {
+ Adapter
+ refValue reflect.Value
+ keyType reflect.Type
+}
+
+// Find converts the input key to a native Golang type and then uses reflection to find the key,
+// returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if m.refValue.Len() == 0 {
+ return nil, false
+ }
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) {
+ k, err := key.ConvertToNative(m.keyType)
+ if err != nil {
+ return nil, false
+ }
+ refKey := reflect.ValueOf(k)
+ val := m.refValue.MapIndex(refKey)
+ if val.IsValid() {
+ return m.NativeToValue(val.Interface()), true
+ }
+ return nil, false
+}
+
+// Iterator creates a Golang reflection based traits.Iterator.
+func (m *reflectMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: m.Adapter,
+ mapKeys: m.refValue.MapRange(),
+ len: m.refValue.Len(),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *reflectMapAccessor) Fold(f traits.Folder) {
+ mapRange := m.refValue.MapRange()
+ for mapRange.Next() {
+ if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) {
+ break
+ }
+ }
+}
+
+func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor {
+ return &refValMapAccessor{mapVal: mapVal}
+}
+
+type refValMapAccessor struct {
+ mapVal map[ref.Val]ref.Val
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ if len(a.mapVal) == 0 {
+ return nil, false
+ }
+ if keyVal, found := a.mapVal[key]; found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := a.mapVal[Int(ik)]; found {
+ return keyVal, found
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ keyVal, found := a.mapVal[Uint(uk)]
+ return keyVal, found
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ keyVal, found := a.mapVal[Int(ik)]
+ return keyVal, found
+ }
+ }
+ return nil, false
+}
+
+// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection.
+func (a *refValMapAccessor) Iterator() traits.Iterator {
+ return &mapIterator{
+ Adapter: DefaultTypeAdapter,
+ mapKeys: reflect.ValueOf(a.mapVal).MapRange(),
+ len: len(a.mapVal),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *refValMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newStringMapAccessor(strMap map[string]string) mapAccessor {
+ return &stringMapAccessor{mapVal: strMap}
+}
+
+type stringMapAccessor struct {
+ mapVal map[string]string
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return String(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor {
+ return &stringIfaceMapAccessor{
+ Adapter: adapter,
+ mapVal: mapVal,
+ }
+}
+
+type stringIfaceMapAccessor struct {
+ Adapter
+ mapVal map[string]any
+}
+
+// Find uses native map accesses to find the key, returning (value, true) if present.
+//
+// If the key is not found the function returns (nil, false).
+func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) {
+ strKey, ok := key.(String)
+ if !ok {
+ return nil, false
+ }
+ keyVal, found := a.mapVal[string(strKey)]
+ if !found {
+ return nil, false
+ }
+ return a.NativeToValue(keyVal), true
+}
+
+// Iterator creates a new traits.Iterator from the string key set of the map.
+func (a *stringIfaceMapAccessor) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]string, len(a.mapVal))
+ i := 0
+ for k := range a.mapVal {
+ mapKeys[i] = k
+ i++
+ }
+ return &stringKeyIterator{
+ mapKeys: mapKeys,
+ len: len(mapKeys),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (a *stringIfaceMapAccessor) Fold(f traits.Folder) {
+ for k, v := range a.mapVal {
+ if !f.FoldEntry(k, v) {
+ break
+ }
+ }
+}
+
+// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to
+// accessing protoreflect.Map values.
+type protoMap struct {
+ Adapter
+ value *pb.Map
+}
+
+// Contains returns whether the map contains the given key.
+func (m *protoMap) Contains(key ref.Val) ref.Val {
+ _, found := m.Find(key)
+ return Bool(found)
+}
+
+// ConvertToNative implements the ref.Val interface method.
+//
+// Note, assignment to Golang struct types is not yet supported.
+func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the map is already assignable to the desired type return it, e.g. interfaces and
+ // maps with the same key value types.
+ switch typeDesc {
+ case anyValueType:
+ json, err := m.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(json.(proto.Message))
+ case jsonValueType, jsonStructType:
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{
+ Fields: jsonEntries.(map[string]*structpb.Value)}
+ if typeDesc == jsonStructType {
+ return jsonMap, nil
+ }
+ return structpb.NewStructValue(jsonMap), nil
+ }
+ switch typeDesc.Kind() {
+ case reflect.Struct, reflect.Ptr:
+ if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
+ return m.value, nil
+ }
+ if reflect.TypeOf(m).AssignableTo(typeDesc) {
+ return m, nil
+ }
+ }
+ if typeDesc.Kind() != reflect.Map {
+ return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc)
+ }
+
+ keyType := m.value.KeyType.ReflectType()
+ valType := m.value.ValueType.ReflectType()
+ otherKeyType := typeDesc.Key()
+ otherValType := typeDesc.Elem()
+ mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len())
+ var err error
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ ntvKey := key.Interface()
+ ntvVal := val.Interface()
+ switch pv := ntvVal.(type) {
+ case protoreflect.Message:
+ ntvVal = pv.Interface()
+ }
+ if keyType == otherKeyType && valType == otherValType {
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ }
+ celKey := m.NativeToValue(ntvKey)
+ celVal := m.NativeToValue(ntvVal)
+ ntvKey, err = celKey.ConvertToNative(otherKeyType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ ntvVal, err = celVal.ConvertToNative(otherValType)
+ if err != nil {
+ // early terminate the range loop.
+ return false
+ }
+ mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
+ return true
+ })
+ if err != nil {
+ return nil, err
+ }
+ return mapVal.Interface(), nil
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+func (m *protoMap) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(traits.Mapper)
+ if !ok {
+ return False
+ }
+ if m.value.Map.Len() != int(otherMap.Size().(Int)) {
+ return False
+ }
+ var retVal ref.Val = True
+ m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
+ keyVal := m.NativeToValue(key.Interface())
+ valVal := m.NativeToValue(val)
+ otherVal, found := otherMap.Find(keyVal)
+ if !found {
+ retVal = False
+ return false
+ }
+ valEq := Equal(valVal, otherVal)
+ if valEq != True {
+ retVal = valEq
+ return false
+ }
+ return true
+ })
+ return retVal
+}
+
+// Find returns whether the protoreflect.Map contains the input key.
+//
+// If the key is not found the function returns (nil, false).
+func (m *protoMap) Find(key ref.Val) (ref.Val, bool) {
+ if keyVal, found := m.findInternal(key); found {
+ return keyVal, true
+ }
+ switch k := key.(type) {
+ // Double is not a valid proto map key type, so check for the key as an int or uint.
+ case Double:
+ if ik, ok := doubleToInt64Lossless(float64(k)); ok {
+ if keyVal, found := m.findInternal(Int(ik)); found {
+ return keyVal, true
+ }
+ }
+ if uk, ok := doubleToUint64Lossless(float64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ // map keys of type double are not supported.
+ case Int:
+ if uk, ok := int64ToUint64Lossless(int64(k)); ok {
+ return m.findInternal(Uint(uk))
+ }
+ case Uint:
+ if ik, ok := uint64ToInt64Lossless(uint64(k)); ok {
+ return m.findInternal(Int(ik))
+ }
+ }
+ return nil, false
+}
+
+// findInternal attempts to convert the incoming key to the map's internal native type
+// and then returns the value, if found.
+func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) {
+ // Convert the input key to the expected protobuf key type.
+ ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType())
+ if err != nil {
+ return nil, false
+ }
+ // Use protoreflection to get the key value.
+ val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey())
+ if !val.IsValid() {
+ return nil, false
+ }
+ // Perform nominal type unwrapping from the input value.
+ switch v := val.Interface().(type) {
+ case protoreflect.List, protoreflect.Map:
+ // Maps do not support list or map values
+ return nil, false
+ default:
+ return m.NativeToValue(v), true
+ }
+}
+
+// Get implements the traits.Indexer interface method.
+func (m *protoMap) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if !found {
+ return ValOrErr(v, "no such key: %v", key)
+ }
+ return v
+}
+
+// IsZeroValue returns true if the map is empty.
+func (m *protoMap) IsZeroValue() bool {
+ return m.value.Len() == 0
+}
+
+// Iterator implements the traits.Iterable interface method.
+func (m *protoMap) Iterator() traits.Iterator {
+ // Copy the keys to make their order stable.
+ mapKeys := make([]protoreflect.MapKey, 0, m.value.Len())
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ mapKeys = append(mapKeys, k)
+ return true
+ })
+ return &protoMapIterator{
+ Adapter: m.Adapter,
+ mapKeys: mapKeys,
+ len: m.value.Len(),
+ }
+}
+
+// Fold calls the FoldEntry method for each (key, value) pair in the map.
+func (m *protoMap) Fold(f traits.Folder) {
+ m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ return f.FoldEntry(k.Interface(), v.Interface())
+ })
+}
+
+// Size returns the number of entries in the protoreflect.Map.
+func (m *protoMap) Size() ref.Val {
+ return Int(m.value.Len())
+}
+
+// Type implements the ref.Val interface method.
+func (m *protoMap) Type() ref.Type {
+ return MapType
+}
+
+// Value implements the ref.Val interface method.
+func (m *protoMap) Value() any {
+ return m.value
+}
+
+type mapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys *reflect.MapIter
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *mapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *mapIterator) Next() ref.Val {
+ if it.HasNext() == True && it.mapKeys.Next() {
+ it.cursor++
+ refKey := it.mapKeys.Key()
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type protoMapIterator struct {
+ *baseIterator
+ Adapter
+ mapKeys []protoreflect.MapKey
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *protoMapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *protoMapIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ refKey := it.mapKeys[index]
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
+
+type stringKeyIterator struct {
+ *baseIterator
+ mapKeys []string
+ cursor int
+ len int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *stringKeyIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *stringKeyIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return String(it.mapKeys[index])
+ }
+ return nil
+}
+
+// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration.
+//
+// For values which are already Foldable, this call is a no-op. For all other values, the fold
+// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method
+// which means that the folding will function, but take a performance hit.
+func ToFoldableMap(m traits.Mapper) traits.Foldable {
+ if f, ok := m.(traits.Foldable); ok {
+ return f
+ }
+ return interopFoldableMap{Mapper: m}
+}
+
+type interopFoldableMap struct {
+ traits.Mapper
+}
+
+func (m interopFoldableMap) Fold(f traits.Folder) {
+ it := m.Iterator()
+ for it.HasNext() == True {
+ k := it.Next()
+ if !f.FoldEntry(k, m.Get(k)) {
+ break
+ }
+ }
+}
+
+// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not
+// already contain the given key.
+//
+// If the map is mutable, it is modified in-place per the MutableMapper contract.
+// If the map is not mutable, a copy containing the new key, value pair is made.
+func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val {
+ if mutable, ok := m.(traits.MutableMapper); ok {
+ return mutable.Insert(k, v)
+ }
+
+ // Otherwise perform the slow version of the insertion which makes a copy of the incoming map.
+ if _, found := m.Find(k); !found {
+ size := m.Size().(Int)
+ copy := make(map[ref.Val]ref.Val, size+1)
+ copy[k] = v
+ it := m.Iterator()
+ for it.HasNext() == True {
+ nextK := it.Next()
+ nextV := m.Get(nextK)
+ copy[nextK] = nextV
+ }
+ return DefaultTypeAdapter.NativeToValue(copy)
+ }
+ return NewErr("insert failed: key %v already exists", k)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go
new file mode 100644
index 000000000..36514ff20
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/null.go
@@ -0,0 +1,119 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+// Null type implementation.
+type Null structpb.NullValue
+
+var (
+ // NullValue singleton.
+ NullValue = Null(structpb.NullValue_NULL_VALUE)
+
+ // golang reflect type for Null values.
+ nullReflectType = reflect.TypeOf(NullValue)
+
+ protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int32:
+ switch typeDesc {
+ case jsonNullType:
+ return structpb.NullValue_NULL_VALUE, nil
+ case nullReflectType:
+ return n, nil
+ }
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Convert to a JSON-null before packing to an Any field since the enum value for JSON
+ // null cannot be packed directly.
+ pb, err := n.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ return anypb.New(pb.(proto.Message))
+ case jsonValueType:
+ return structpb.NewNullValue(), nil
+ case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
+ int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
+ uint64WrapperType, durationValueType, timestampValueType, protoIfaceType:
+ return nil, nil
+ case jsonListValueType, jsonStructType:
+ // skip handling
+ default:
+ if typeDesc.Implements(protoIfaceType) {
+ return nil, nil
+ }
+ }
+ case reflect.Interface:
+ nv := n.Value()
+ if reflect.TypeOf(nv).Implements(typeDesc) {
+ return nv, nil
+ }
+ if reflect.TypeOf(n).Implements(typeDesc) {
+ return n, nil
+ }
+ }
+ // If the type conversion isn't supported return an error.
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String("null")
+ case NullType:
+ return n
+ case TypeType:
+ return NullType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (n Null) Equal(other ref.Val) ref.Val {
+ return Bool(NullType == other.Type())
+}
+
+// IsZeroValue returns true as null always represents an absent value.
+func (n Null) IsZeroValue() bool {
+ return true
+}
+
+// Type implements ref.Val.Type.
+func (n Null) Type() ref.Type {
+ return NullType
+}
+
+// Value implements ref.Val.Value.
+func (n Null) Value() any {
+ return structpb.NullValue_NULL_VALUE
+}
diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go
new file mode 100644
index 000000000..8ba0af9fb
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/object.go
@@ -0,0 +1,165 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+type protoObj struct {
+ Adapter
+ value proto.Message
+ typeDesc *pb.TypeDescription
+ typeValue ref.Val
+}
+
+// NewObject returns an object based on a proto.Message value which handles
+// conversion between protobuf type values and expression type values.
+// Objects support indexing and iteration.
+//
+// Note: the type value is pulled from the list of registered types within the
+// type provider. If the proto type is not registered within the type provider,
+// then this will result in an error within the type adapter / provider.
+func NewObject(adapter Adapter,
+ typeDesc *pb.TypeDescription,
+ typeValue ref.Val,
+ value proto.Message) ref.Val {
+ return &protoObj{
+ Adapter: adapter,
+ value: value,
+ typeDesc: typeDesc,
+ typeValue: typeValue}
+}
+
+func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ srcPB := o.value
+ if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
+ return srcPB, nil
+ }
+ if reflect.TypeOf(o).AssignableTo(typeDesc) {
+ return o, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ _, isAny := srcPB.(*anypb.Any)
+ if isAny {
+ return srcPB, nil
+ }
+ return anypb.New(srcPB)
+ case jsonValueType:
+ // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no
+ // support for direct conversion from proto.Message to protobuf.Value.
+ bytes, err := protojson.Marshal(srcPB)
+ if err != nil {
+ return nil, err
+ }
+ json := &structpb.Value{}
+ err = protojson.Unmarshal(bytes, json)
+ if err != nil {
+ return nil, err
+ }
+ return json, nil
+ default:
+ if typeDesc == o.typeDesc.ReflectType() {
+ return o.value, nil
+ }
+ if typeDesc.Kind() == reflect.Ptr {
+ val := reflect.New(typeDesc.Elem()).Interface()
+ dstPB, ok := val.(proto.Message)
+ if ok {
+ err := pb.Merge(dstPB, srcPB)
+ if err != nil {
+ return nil, fmt.Errorf("type conversion error: %v", err)
+ }
+ return dstPB, nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc)
+}
+
+func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ default:
+ if o.Type().TypeName() == typeVal.TypeName() {
+ return o
+ }
+ case TypeType:
+ return o.typeValue
+ }
+ return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal)
+}
+
+func (o *protoObj) Equal(other ref.Val) ref.Val {
+ otherPB, ok := other.Value().(proto.Message)
+ return Bool(ok && pb.Equal(o.value, otherPB))
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *protoObj) IsSet(field ref.Val) ref.Val {
+ protoFieldName, ok := field.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(field)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", field)
+ }
+ if fd.IsSet(o.value) {
+ return True
+ }
+ return False
+}
+
+// IsZeroValue returns true if the protobuf object is empty.
+func (o *protoObj) IsZeroValue() bool {
+ return proto.Equal(o.value, o.typeDesc.Zero())
+}
+
+func (o *protoObj) Get(index ref.Val) ref.Val {
+ protoFieldName, ok := index.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(index)
+ }
+ protoFieldStr := string(protoFieldName)
+ fd, found := o.typeDesc.FieldByName(protoFieldStr)
+ if !found {
+ return NewErr("no such field '%s'", index)
+ }
+ fv, err := fd.GetFrom(o.value)
+ if err != nil {
+ return NewErr(err.Error())
+ }
+ return o.NativeToValue(fv)
+}
+
+func (o *protoObj) Type() ref.Type {
+ return o.typeValue.(ref.Type)
+}
+
+func (o *protoObj) Value() any {
+ return o.value
+}
diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go
new file mode 100644
index 000000000..97845a740
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ // OptionalType indicates the runtime type of an optional value.
+ OptionalType = NewOpaqueType("optional_type")
+
+ // OptionalNone is a sentinel value which is used to indicate an empty optional value.
+ OptionalNone = &Optional{}
+)
+
+// OptionalOf returns an optional value which wraps a concrete CEL value.
+func OptionalOf(value ref.Val) *Optional {
+ return &Optional{value: value}
+}
+
+// Optional value which points to a value if non-empty.
+type Optional struct {
+ value ref.Val
+}
+
+// HasValue returns true if the optional has a value.
+func (o *Optional) HasValue() bool {
+ return o.value != nil
+}
+
+// GetValue returns the wrapped value contained in the optional.
+func (o *Optional) GetValue() ref.Val {
+ if !o.HasValue() {
+ return NewErr("optional.none() dereference")
+ }
+ return o.value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if !o.HasValue() {
+ return nil, errors.New("optional.none() dereference")
+ }
+ return o.value.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case OptionalType:
+ return o
+ case TypeType:
+ return OptionalType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal)
+}
+
+// Equal determines whether the values contained by two optional values are equal.
+func (o *Optional) Equal(other ref.Val) ref.Val {
+ otherOpt, isOpt := other.(*Optional)
+ if !isOpt {
+ return False
+ }
+ if !o.HasValue() {
+ return Bool(!otherOpt.HasValue())
+ }
+ if !otherOpt.HasValue() {
+ return False
+ }
+ return o.value.Equal(otherOpt.value)
+}
+
+func (o *Optional) String() string {
+ if o.HasValue() {
+ return fmt.Sprintf("optional(%v)", o.GetValue())
+ }
+ return "optional.none()"
+}
+
+// Type implements the ref.Val interface method.
+func (o *Optional) Type() ref.Type {
+ return OptionalType
+}
+
+// Value returns the underlying 'Value()' of the wrapped value, if present.
+func (o *Optional) Value() any {
+ if o.value == nil {
+ return nil
+ }
+ return o.value.Value()
+}
diff --git a/vendor/github.com/google/cel-go/common/types/overflow.go b/vendor/github.com/google/cel-go/common/types/overflow.go
new file mode 100644
index 000000000..dcb66ef59
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/overflow.go
@@ -0,0 +1,429 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "math"
+ "time"
+)
+
+var (
+ doubleTwoTo64 = math.Ldexp(1.0, 64)
+)
+
+// addInt64Checked performs addition with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func addInt64Checked(x, y int64) (int64, error) {
+ if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) {
+ return 0, errIntOverflow
+ }
+ return x + y, nil
+}
+
+// subtractInt64Checked performs subtraction with overflow detection of two int64 values.
+//
+// If the operation fails the error return value will be non-nil.
+func subtractInt64Checked(x, y int64) (int64, error) {
+ if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) {
+ return 0, errIntOverflow
+ }
+ return x - y, nil
+}
+
+// negateInt64Checked performs negation with overflow detection of an int64.
+//
+// If the operation fails the error return value will be non-nil.
+func negateInt64Checked(x int64) (int64, error) {
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 {
+ return 0, errIntOverflow
+ }
+ return -x, nil
+}
+
+// multiplyInt64Checked performs multiplication with overflow detection of two int64 value.
+//
+// If the operation fails the error return value will be non-nil.
+func multiplyInt64Checked(x, y int64) (int64, error) {
+ // Detecting multiplication overflow is more complicated than the others. The first two detect
+ // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal
+ // overflow conditions.
+ if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) ||
+ // x is positive, y is positive
+ (x > 0 && y > 0 && x > math.MaxInt64/y) ||
+ // x is positive, y is negative
+ (x > 0 && y < 0 && y < math.MinInt64/x) ||
+ // x is negative, y is positive
+ (x < 0 && y > 0 && x < math.MinInt64/y) ||
+ // x is negative, y is negative
+ (x < 0 && y < 0 && y < math.MaxInt64/x) {
+ return 0, errIntOverflow
+ }
+ return x * y, nil
+}
+
+// divideInt64Checked performs division with overflow detection of two int64 values,
+// as well as a division by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func divideInt64Checked(x, y int64) (int64, error) {
+ // Division by zero.
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x / y, nil
+}
+
+// moduloInt64Checked performs modulo with overflow detection of two int64 values
+// as well as a modulus by zero check.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloInt64Checked(x, y int64) (int64, error) {
+ // Modulus by zero.
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1.
+ if x == math.MinInt64 && y == -1 {
+ return 0, errIntOverflow
+ }
+ return x % y, nil
+}
+
+// addUint64Checked performs addition with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addUint64Checked(x, y uint64) (uint64, error) {
+ if y > 0 && x > math.MaxUint64-y {
+ return 0, errUintOverflow
+ }
+ return x + y, nil
+}
+
+// subtractUint64Checked performs subtraction with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractUint64Checked(x, y uint64) (uint64, error) {
+ if y > x {
+ return 0, errUintOverflow
+ }
+ return x - y, nil
+}
+
+// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func multiplyUint64Checked(x, y uint64) (uint64, error) {
+ if y != 0 && x > math.MaxUint64/y {
+ return 0, errUintOverflow
+ }
+ return x * y, nil
+}
+
+// divideUint64Checked performs division with a test for division by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func divideUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errDivideByZero
+ }
+ return x / y, nil
+}
+
+// moduloUint64Checked performs modulo with a test for modulus by zero.
+//
+// If the operation fails the error return value will be non-nil.
+func moduloUint64Checked(x, y uint64) (uint64, error) {
+ if y == 0 {
+ return 0, errModulusByZero
+ }
+ return x % y, nil
+}
+
+// addDurationChecked performs addition with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := addInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// subtractDurationChecked performs subtraction with overflow detection of two time.Durations.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractDurationChecked(x, y time.Duration) (time.Duration, error) {
+ val, err := subtractInt64Checked(int64(x), int64(y))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// negateDurationChecked performs negation with overflow detection of a time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func negateDurationChecked(x time.Duration) (time.Duration, error) {
+ val, err := negateInt64Checked(int64(x))
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(val), nil
+}
+
+// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // This is tricky. A time is represented as (int64, int32) where the first is seconds and second
+ // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64
+ // as it could potentially overflow. The only way to proceed is to break time and duration into
+ // second and nanosecond components.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by dividing and modulo.
+ sec2 := int64(y) / int64(time.Second) // Truncate to seconds.
+ nsec2 := int64(y) % int64(time.Second) // Get remainder.
+
+ // Add seconds first, detecting any overflow.
+ sec, err := addInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Time{}, err
+ }
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 + nsec2
+
+ // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds.
+ // Adapted from time.Unix(int64, int64).
+ if nsec < 0 || nsec >= int64(time.Second) {
+ // Add seconds.
+ sec, err = addInt64Checked(sec, nsec/int64(time.Second))
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ nsec -= (nsec / int64(time.Second)) * int64(time.Second)
+ if nsec < 0 {
+ // Subtract an extra second
+ sec, err = addInt64Checked(sec, -1)
+ if err != nil {
+ return time.Time{}, err
+ }
+ nsec += int64(time.Second)
+ }
+ }
+
+ // Check if the the number of seconds from Unix epoch is within our acceptable range.
+ if sec < minUnixTime || sec > maxUnixTime {
+ return time.Time{}, errTimestampOverflow
+ }
+
+ // Return resulting time and propagate time zone.
+ return time.Unix(sec, nsec).In(x.Location()), nil
+}
+
+// subtractTimeChecked performs subtraction with overflow detection of two time.Time.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeChecked(x, y time.Time) (time.Duration, error) {
+ // Similar to addTimeDurationOverflow() above.
+
+ // First we break time into its components by truncating and subtracting.
+ sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Second we break duration into its components by truncating and subtracting.
+ sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds.
+ nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting.
+
+ // Subtract seconds first, detecting any overflow.
+ sec, err := subtractInt64Checked(sec1, sec2)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999].
+ nsec := nsec1 - nsec2
+
+ // Scale seconds to nanoseconds detecting overflow.
+ tsec, err := multiplyInt64Checked(sec, int64(time.Second))
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ // Lastly we need to add the two nanoseconds together.
+ val, err := addInt64Checked(tsec, nsec)
+ if err != nil {
+ return time.Duration(0), err
+ }
+
+ return time.Duration(val), nil
+}
+
+// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and
+// time.Duration.
+//
+// If the operation fails due to overflow the error return value will be non-nil.
+func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) {
+ // The easiest way to implement this is to negate y and add them.
+ // x - y = x + -y
+ val, err := negateDurationChecked(y)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return addTimeDurationChecked(x, val)
+}
+
+// doubleToInt64Checked converts a double to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToInt64Checked(v float64) (int64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+// doubleToInt64Checked converts a double to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func doubleToUint64Checked(v float64) (uint64, error) {
+ if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToUint64Checked converts an int64 to a uint64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToUint64Checked(v int64) (uint64, error) {
+ if v < 0 {
+ return 0, errUintOverflow
+ }
+ return uint64(v), nil
+}
+
+// int64ToInt8Checked converts an int64 to an int8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt8Checked(v int64) (int8, error) {
+ if v < math.MinInt8 || v > math.MaxInt8 {
+ return 0, errIntOverflow
+ }
+ return int8(v), nil
+}
+
+// int64ToInt16Checked converts an int64 to an int16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt16Checked(v int64) (int16, error) {
+ if v < math.MinInt16 || v > math.MaxInt16 {
+ return 0, errIntOverflow
+ }
+ return int16(v), nil
+}
+
+// int64ToInt32Checked converts an int64 to an int32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func int64ToInt32Checked(v int64) (int32, error) {
+ if v < math.MinInt32 || v > math.MaxInt32 {
+ return 0, errIntOverflow
+ }
+ return int32(v), nil
+}
+
+// uint64ToUint8Checked converts a uint64 to a uint8 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint8Checked(v uint64) (uint8, error) {
+ if v > math.MaxUint8 {
+ return 0, errUintOverflow
+ }
+ return uint8(v), nil
+}
+
+// uint64ToUint16Checked converts a uint64 to a uint16 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint16Checked(v uint64) (uint16, error) {
+ if v > math.MaxUint16 {
+ return 0, errUintOverflow
+ }
+ return uint16(v), nil
+}
+
+// uint64ToUint32Checked converts a uint64 to a uint32 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToUint32Checked(v uint64) (uint32, error) {
+ if v > math.MaxUint32 {
+ return 0, errUintOverflow
+ }
+ return uint32(v), nil
+}
+
+// uint64ToInt64Checked converts a uint64 to an int64 value.
+//
+// If the conversion fails due to overflow the error return value will be non-nil.
+func uint64ToInt64Checked(v uint64) (int64, error) {
+ if v > math.MaxInt64 {
+ return 0, errIntOverflow
+ }
+ return int64(v), nil
+}
+
+func doubleToUint64Lossless(v float64) (uint64, bool) {
+ u, err := doubleToUint64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(u) != v {
+ return 0, false
+ }
+ return u, true
+}
+
+func doubleToInt64Lossless(v float64) (int64, bool) {
+ i, err := doubleToInt64Checked(v)
+ if err != nil {
+ return 0, false
+ }
+ if float64(i) != v {
+ return 0, false
+ }
+ return i, true
+}
+
+func int64ToUint64Lossless(v int64) (uint64, bool) {
+ u, err := int64ToUint64Checked(v)
+ return u, err == nil
+}
+
+func uint64ToInt64Lossless(v uint64) (int64, bool) {
+ i, err := uint64ToInt64Checked(v)
+ return i, err == nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
new file mode 100644
index 000000000..e2b9d37b5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
@@ -0,0 +1,53 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "checked.go",
+ "enum.go",
+ "equal.go",
+ "file.go",
+ "pb.go",
+ "type.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/pb",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//encoding/protowire:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoregistry:go_default_library",
+ "@org_golang_google_protobuf//types/dynamicpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/emptypb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "equal_test.go",
+ "file_test.go",
+ "pb_test.go",
+ "type_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//checker/decls:go_default_library",
+ "//test/proto2pb:test_all_types_go_proto",
+ "//test/proto3pb:test_all_types_go_proto",
+ "@org_golang_google_protobuf//reflect/protodesc:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/descriptorpb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/pb/checked.go b/vendor/github.com/google/cel-go/common/types/pb/checked.go
new file mode 100644
index 000000000..312a6a072
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/checked.go
@@ -0,0 +1,93 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ // CheckedPrimitives map from proto field descriptor type to expr.Type.
+ CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{
+ protoreflect.BoolKind: checkedBool,
+ protoreflect.BytesKind: checkedBytes,
+ protoreflect.DoubleKind: checkedDouble,
+ protoreflect.FloatKind: checkedDouble,
+ protoreflect.Int32Kind: checkedInt,
+ protoreflect.Int64Kind: checkedInt,
+ protoreflect.Sint32Kind: checkedInt,
+ protoreflect.Sint64Kind: checkedInt,
+ protoreflect.Uint32Kind: checkedUint,
+ protoreflect.Uint64Kind: checkedUint,
+ protoreflect.Fixed32Kind: checkedUint,
+ protoreflect.Fixed64Kind: checkedUint,
+ protoreflect.Sfixed32Kind: checkedInt,
+ protoreflect.Sfixed64Kind: checkedInt,
+ protoreflect.StringKind: checkedString}
+
+ // CheckedWellKnowns map from qualified proto type name to expr.Type for
+ // well-known proto types.
+ CheckedWellKnowns = map[string]*exprpb.Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": checkedWrap(checkedBool),
+ "google.protobuf.BytesValue": checkedWrap(checkedBytes),
+ "google.protobuf.DoubleValue": checkedWrap(checkedDouble),
+ "google.protobuf.FloatValue": checkedWrap(checkedDouble),
+ "google.protobuf.Int64Value": checkedWrap(checkedInt),
+ "google.protobuf.Int32Value": checkedWrap(checkedInt),
+ "google.protobuf.UInt64Value": checkedWrap(checkedUint),
+ "google.protobuf.UInt32Value": checkedWrap(checkedUint),
+ "google.protobuf.StringValue": checkedWrap(checkedString),
+ // Well-known types.
+ "google.protobuf.Any": checkedAny,
+ "google.protobuf.Duration": checkedDuration,
+ "google.protobuf.Timestamp": checkedTimestamp,
+ // Json types.
+ "google.protobuf.ListValue": checkedListDyn,
+ "google.protobuf.NullValue": checkedNull,
+ "google.protobuf.Struct": checkedMapStringDyn,
+ "google.protobuf.Value": checkedDyn,
+ }
+
+ // common types
+ checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
+ // Wrapper and primitive types.
+ checkedBool = checkedPrimitive(exprpb.Type_BOOL)
+ checkedBytes = checkedPrimitive(exprpb.Type_BYTES)
+ checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
+ checkedInt = checkedPrimitive(exprpb.Type_INT64)
+ checkedString = checkedPrimitive(exprpb.Type_STRING)
+ checkedUint = checkedPrimitive(exprpb.Type_UINT64)
+ // Well-known type equivalents.
+ checkedAny = checkedWellKnown(exprpb.Type_ANY)
+ checkedDuration = checkedWellKnown(exprpb.Type_DURATION)
+ checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
+ // Json-based type equivalents.
+ checkedNull = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ checkedListDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
+ checkedMapStringDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: checkedString,
+ ValueType: checkedDyn}}}
+)
diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go
new file mode 100644
index 000000000..09a154630
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -0,0 +1,44 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// newEnumValueDescription produces an enum value description with the fully qualified enum value
+// name and the enum value descriptor.
+func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
+ return &EnumValueDescription{
+ enumValueName: name,
+ desc: desc,
+ }
+}
+
+// EnumValueDescription maps a fully-qualified enum value name to its numeric value.
+type EnumValueDescription struct {
+ enumValueName string
+ desc protoreflect.EnumValueDescriptor
+}
+
+// Name returns the fully-qualified identifier name for the enum value.
+func (ed *EnumValueDescription) Name() string {
+ return ed.enumValueName
+}
+
+// Value returns the (numeric) value of the enum.
+func (ed *EnumValueDescription) Value() int32 {
+ return int32(ed.desc.Number())
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/equal.go b/vendor/github.com/google/cel-go/common/types/pb/equal.go
new file mode 100644
index 000000000..76893d85e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/equal.go
@@ -0,0 +1,206 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "bytes"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+// Equal returns whether two proto.Message instances are equal using the following criteria:
+//
+// - Messages must share the same instance of the type descriptor
+// - Known set fields are compared using semantics equality
+// - Bytes are compared using bytes.Equal
+// - Scalar values are compared with operator ==
+// - List and map types are equal if they have the same length and all elements are equal
+// - Messages are equal if they share the same descriptor and all set fields are equal
+// - Unknown fields are compared using byte equality
+// - NaN values are not equal to each other
+// - google.protobuf.Any values are unpacked before comparison
+// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than
+// semantic equality.
+//
+// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer
+// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods
+// behaviors which needed to treat NaN values as equal due to Java semantics.
+func Equal(x, y proto.Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ xRef := x.ProtoReflect()
+ yRef := y.ProtoReflect()
+ return equalMessage(xRef, yRef)
+}
+
+func equalMessage(mx, my protoreflect.Message) bool {
+ // Note, the original proto.Equal upon which this implementation is based does not specifically handle the
+ // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise
+ // comparison will be used, though the semantics of validity are neither clear, nor promised within the
+ // proto.Equal implementation.
+ if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison
+ // as otherwise the Any values are compared by bytes rather than structurally.
+ if isAny(mx) && isAny(my) {
+ ax := mx.Interface().(*anypb.Any)
+ ay := my.Interface().(*anypb.Any)
+ // If the values are not the same type url, return false.
+ if ax.GetTypeUrl() != ay.GetTypeUrl() {
+ return false
+ }
+ // If the values are byte equal, then return true.
+ if bytes.Equal(ax.GetValue(), ay.GetValue()) {
+ return true
+ }
+ // Otherwise fall through to the semantic comparison of the any values.
+ x, err := ax.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ y, err := ay.UnmarshalNew()
+ if err != nil {
+ return false
+ }
+ // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly.
+ return equalMessage(x.ProtoReflect(), y.ProtoReflect())
+ }
+
+ // Walk the set fields to determine field-wise equality
+ nx := 0
+ equal := true
+ mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
+ nx++
+ equal = my.Has(fd) && equalField(fd, vx, my.Get(fd))
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ // Establish the count of set fields on message y
+ ny := 0
+ my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
+ ny++
+ return true
+ })
+ // If the number of set fields is not equal return false.
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch {
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return x.Bool() == y.Bool()
+ case protoreflect.EnumKind:
+ return x.Enum() == y.Enum()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind,
+ protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+ return x.Int() == y.Int()
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
+ protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+ return x.Uint() == y.Uint()
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ return x.Float() == y.Float()
+ case protoreflect.StringKind:
+ return x.String() == y.String()
+ case protoreflect.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return equalMessage(x.Message(), y.Message())
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ lenX := len(x)
+ lenY := len(y)
+ if lenX != lenY {
+ return false
+ }
+ if lenX == 0 {
+ return true
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
+
+func isAny(m protoreflect.Message) bool {
+ return string(m.Descriptor().FullName()) == "google.protobuf.Any"
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go
new file mode 100644
index 000000000..e323afb1d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -0,0 +1,202 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+)
+
+// newFileDescription returns a FileDescription instance with a complete listing of all the message
+// types and enum values, as well as a map of extensions declared within any scope in the file.
+func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) {
+ metadata := collectFileMetadata(fileDesc)
+ enums := make(map[string]*EnumValueDescription)
+ for name, enumVal := range metadata.enumValues {
+ enums[name] = newEnumValueDescription(name, enumVal)
+ }
+ types := make(map[string]*TypeDescription)
+ for name, msgType := range metadata.msgTypes {
+ types[name] = newTypeDescription(name, msgType, pbdb.extensions)
+ }
+ fileExtMap := make(extensionMap)
+ for typeName, extensions := range metadata.msgExtensionMap {
+ messageExtMap, found := fileExtMap[typeName]
+ if !found {
+ messageExtMap = make(map[string]*FieldDescription)
+ }
+ for _, ext := range extensions {
+ extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor()
+ messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc)
+ }
+ fileExtMap[typeName] = messageExtMap
+ }
+ return &FileDescription{
+ name: fileDesc.Path(),
+ types: types,
+ enums: enums,
+ }, fileExtMap
+}
+
+// FileDescription holds a map of all types and enum values declared within a proto file.
+type FileDescription struct {
+ name string
+ types map[string]*TypeDescription
+ enums map[string]*EnumValueDescription
+}
+
+// Copy creates a copy of the FileDescription with updated Db references within its types.
+func (fd *FileDescription) Copy(pbdb *Db) *FileDescription {
+ typesCopy := make(map[string]*TypeDescription, len(fd.types))
+ for k, v := range fd.types {
+ typesCopy[k] = v.Copy(pbdb)
+ }
+ return &FileDescription{
+ name: fd.name,
+ types: typesCopy,
+ enums: fd.enums,
+ }
+}
+
+// GetName returns the fully qualified file path for the file.
+func (fd *FileDescription) GetName() string {
+ return fd.name
+}
+
+// GetEnumDescription returns an EnumDescription for a qualified enum value
+// name declared within the .proto file.
+func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
+ ed, found := fd.enums[sanitizeProtoName(enumName)]
+ return ed, found
+}
+
+// GetEnumNames returns the string names of all enum values in the file.
+func (fd *FileDescription) GetEnumNames() []string {
+ enumNames := make([]string, len(fd.enums))
+ i := 0
+ for _, e := range fd.enums {
+ enumNames[i] = e.Name()
+ i++
+ }
+ return enumNames
+}
+
+// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name
+// declared within the .proto file.
+func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) {
+ td, found := fd.types[sanitizeProtoName(typeName)]
+ return td, found
+}
+
+// GetTypeNames returns the list of all type names contained within the file.
+func (fd *FileDescription) GetTypeNames() []string {
+ typeNames := make([]string, len(fd.types))
+ i := 0
+ for _, t := range fd.types {
+ typeNames[i] = t.Name()
+ i++
+ }
+ return typeNames
+}
+
+// sanitizeProtoName strips the leading '.' from the proto message name.
+func sanitizeProtoName(name string) string {
+ if name != "" && name[0] == '.' {
+ return name[1:]
+ }
+ return name
+}
+
+// fileMetadata is a flattened view of message types and enum values within a file descriptor.
+type fileMetadata struct {
+ // msgTypes maps from fully-qualified message name to descriptor.
+ msgTypes map[string]protoreflect.MessageDescriptor
+ // enumValues maps from fully-qualified enum value to enum value descriptor.
+ enumValues map[string]protoreflect.EnumValueDescriptor
+ // msgExtensionMap maps from the protobuf message name being extended to a set of extensions
+ // for the type.
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor
+
+ // TODO: support enum type definitions for use in future type-check enhancements.
+}
+
+// collectFileMetadata traverses the proto file object graph to collect message types and enum
+// values and index them by their fully qualified names.
+func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
+ msgTypes := make(map[string]protoreflect.MessageDescriptor)
+ enumValues := make(map[string]protoreflect.EnumValueDescriptor)
+ msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor)
+ collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap)
+ collectEnumValues(fileDesc.Enums(), enumValues)
+ collectExtensions(fileDesc.Extensions(), msgExtensionMap)
+ return &fileMetadata{
+ msgTypes: msgTypes,
+ enumValues: enumValues,
+ msgExtensionMap: msgExtensionMap,
+ }
+}
+
+// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
+// fully qualified protobuf names to descriptors.
+func collectMsgTypes(msgTypes protoreflect.MessageDescriptors,
+ msgTypeMap map[string]protoreflect.MessageDescriptor,
+ enumValueMap map[string]protoreflect.EnumValueDescriptor,
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < msgTypes.Len(); i++ {
+ msgType := msgTypes.Get(i)
+ msgTypeMap[string(msgType.FullName())] = msgType
+ nestedMsgTypes := msgType.Messages()
+ if nestedMsgTypes.Len() != 0 {
+ collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap)
+ }
+ nestedEnumTypes := msgType.Enums()
+ if nestedEnumTypes.Len() != 0 {
+ collectEnumValues(nestedEnumTypes, enumValueMap)
+ }
+ nestedExtensions := msgType.Extensions()
+ if nestedExtensions.Len() != 0 {
+ collectExtensions(nestedExtensions, msgExtensionMap)
+ }
+ }
+}
+
+// collectEnumValues accumulates the enum values within an enum declaration.
+func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
+ for i := 0; i < enumTypes.Len(); i++ {
+ enumType := enumTypes.Get(i)
+ enumTypeValues := enumType.Values()
+ for j := 0; j < enumTypeValues.Len(); j++ {
+ enumValue := enumTypeValues.Get(j)
+ enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name()))
+ enumValueMap[enumValueName] = enumValue
+ }
+ }
+}
+
+func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < extensions.Len(); i++ {
+ ext := extensions.Get(i)
+ extendsMsg := string(ext.ContainingMessage().FullName())
+ msgExts, found := msgExtensionMap[extendsMsg]
+ if !found {
+ msgExts = []protoreflect.ExtensionDescriptor{}
+ }
+ msgExts = append(msgExts, ext)
+ msgExtensionMap[extendsMsg] = msgExts
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go
new file mode 100644
index 000000000..eadebcb04
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -0,0 +1,258 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pb reflects over protocol buffer descriptors to generate objects
+// that simplify type, enum, and field lookup.
+package pb
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durpb "google.golang.org/protobuf/types/known/durationpb"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tspb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Db maps from file / message / enum name to file description.
+//
+// Each Db is isolated from each other, and while information about protobuf descriptors may be
+// fetched from the global protobuf registry, no descriptors are added to this registry, else
+// the isolation guarantees of the Db object would be violated.
+type Db struct {
+ revFileDescriptorMap map[string]*FileDescription
+ // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
+ files []*FileDescription
+ // extensions contains the mapping between a given type name, extension name and its FieldDescription
+ extensions map[string]map[string]*FieldDescription
+}
+
+// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription
+type extensionMap = map[string]map[string]*FieldDescription
+
+var (
+ // DefaultDb used at evaluation time or unless overridden at check time.
+ DefaultDb = &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+)
+
+// Merge will copy the source proto message into the destination, or error if the merge cannot be completed.
+//
+// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not
+// share the same instance of their type descriptor.
+func Merge(dstPB, srcPB proto.Message) error {
+ src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect()
+ if src.Descriptor() == dst.Descriptor() {
+ proto.Merge(dstPB, srcPB)
+ return nil
+ }
+ if src.Descriptor().FullName() != dst.Descriptor().FullName() {
+ return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v",
+ dst.Descriptor().FullName(), src.Descriptor().FullName())
+ }
+ bytes, err := proto.Marshal(srcPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err)
+ }
+ err = proto.Unmarshal(bytes, dstPB)
+ if err != nil {
+ return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err)
+ }
+ return nil
+}
+
+// NewDb creates a new `pb.Db` with an empty type name to file description map.
+func NewDb() *Db {
+ pbdb := &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ files: []*FileDescription{},
+ extensions: make(extensionMap),
+ }
+ // The FileDescription objects in the default db contain lazily initialized TypeDescription
+ // values which may point to the state contained in the DefaultDb irrespective of this shallow
+ // copy; however, the type graph for a field is idempotently computed, and is guaranteed to
+ // only be initialized once thanks to atomic values within the TypeDescription objects, so it
+ // is safe to share these values across instances.
+ for k, v := range DefaultDb.revFileDescriptorMap {
+ pbdb.revFileDescriptorMap[k] = v
+ }
+ pbdb.files = append(pbdb.files, DefaultDb.files...)
+ return pbdb
+}
+
+// Copy creates a copy of the current database with its own internal descriptor mapping.
+func (pbdb *Db) Copy() *Db {
+ copy := NewDb()
+ for _, fd := range pbdb.files {
+ hasFile := false
+ for _, fd2 := range copy.files {
+ if fd2 == fd {
+ hasFile = true
+ }
+ }
+ if !hasFile {
+ fd = fd.Copy(copy)
+ copy.files = append(copy.files, fd)
+ }
+ for _, enumValName := range fd.GetEnumNames() {
+ copy.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ copy.revFileDescriptorMap[msgTypeName] = fd
+ }
+ copy.revFileDescriptorMap[fd.GetName()] = fd
+ }
+ for typeName, extFieldMap := range pbdb.extensions {
+ copyExtFieldMap, found := copy.extensions[typeName]
+ if !found {
+ copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap))
+ }
+ for extFieldName, fd := range extFieldMap {
+ copyExtFieldMap[extFieldName] = fd
+ }
+ copy.extensions[typeName] = copyExtFieldMap
+ }
+ return copy
+}
+
+// FileDescriptions returns the set of file descriptions associated with this db.
+func (pbdb *Db) FileDescriptions() []*FileDescription {
+ return pbdb.files
+}
+
+// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the
+// message and enum types into the `pb.Db`.
+func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) {
+ fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()]
+ if found {
+ return fd, nil
+ }
+ // Make sure to search the global registry to see if a protoreflect.FileDescriptor for
+ // the file specified has been linked into the binary. If so, use the copy of the descriptor
+ // from the global cache.
+ //
+ // Note: Proto reflection relies on descriptor values being object equal rather than object
+ // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto
+ // will be incompatible with the FieldDescriptor in the global registry and any message created
+ // from that global registry.
+ globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path())
+ if err == nil {
+ fileDesc = globalFD
+ }
+ var fileExtMap extensionMap
+ fd, fileExtMap = newFileDescription(fileDesc, pbdb)
+ for _, enumValName := range fd.GetEnumNames() {
+ pbdb.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ pbdb.revFileDescriptorMap[msgTypeName] = fd
+ }
+ pbdb.revFileDescriptorMap[fd.GetName()] = fd
+
+ // Return the specific file descriptor registered.
+ pbdb.files = append(pbdb.files, fd)
+
+ // Index the protobuf message extensions from the file into the pbdb
+ for typeName, extMap := range fileExtMap {
+ typeExtMap, found := pbdb.extensions[typeName]
+ if !found {
+ pbdb.extensions[typeName] = extMap
+ continue
+ }
+ for extName, field := range extMap {
+ typeExtMap[extName] = field
+ }
+ }
+ return fd, nil
+}
+
+// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all
+// other definitions within the message file into the `pb.Db`.
+func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) {
+ msgDesc := message.ProtoReflect().Descriptor()
+ msgName := msgDesc.FullName()
+ typeName := sanitizeProtoName(string(msgName))
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd, nil
+ }
+ return pbdb.RegisterDescriptor(msgDesc.ParentFile())
+}
+
+// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the
+// `pb.Db`.
+func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) {
+ enumName = sanitizeProtoName(enumName)
+ if fd, found := pbdb.revFileDescriptorMap[enumName]; found {
+ return fd.GetEnumDescription(enumName)
+ }
+ return nil, false
+}
+
+// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`.
+func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) {
+ typeName = sanitizeProtoName(typeName)
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd.GetTypeDescription(typeName)
+ }
+ return nil, false
+}
+
+// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input
+// message is declared.
+func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor {
+ fdMap := map[string]protoreflect.FileDescriptor{}
+ parentFile := message.ProtoReflect().Descriptor().ParentFile()
+ fdMap[parentFile.Path()] = parentFile
+ // Initialize list of dependencies
+ deps := make([]protoreflect.FileImport, parentFile.Imports().Len())
+ for i := 0; i < parentFile.Imports().Len(); i++ {
+ deps[i] = parentFile.Imports().Get(i)
+ }
+ // Expand list for new dependencies
+ for i := 0; i < len(deps); i++ {
+ dep := deps[i]
+ if _, found := fdMap[dep.Path()]; found {
+ continue
+ }
+ fdMap[dep.Path()] = dep.FileDescriptor
+ for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ {
+ deps = append(deps, dep.FileDescriptor.Imports().Get(j))
+ }
+ }
+ return fdMap
+}
+
+func init() {
+ // Describe well-known types to ensure they can always be resolved by the check and interpret
+ // execution phases.
+ //
+ // The following subset of message types is enough to ensure that all well-known types can
+ // resolved in the runtime, since describing the value results in describing the whole file
+ // where the message is declared.
+ DefaultDb.RegisterMessage(&anypb.Any{})
+ DefaultDb.RegisterMessage(&durpb.Duration{})
+ DefaultDb.RegisterMessage(&emptypb.Empty{})
+ DefaultDb.RegisterMessage(&tspb.Timestamp{})
+ DefaultDb.RegisterMessage(&structpb.Value{})
+ DefaultDb.RegisterMessage(&wrapperspb.BoolValue{})
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go
new file mode 100644
index 000000000..bdd474c95
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -0,0 +1,614 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// description is a private interface used to make it convenient to perform type unwrapping at
+// the TypeDescription or FieldDescription level.
+type description interface {
+ // Zero returns an empty immutable protobuf message when the description is a protobuf message
+ // type.
+ Zero() proto.Message
+}
+
+// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name
+// with a given descriptor.
+func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription {
+ msgType := dynamicpb.NewMessageType(desc)
+ msgZero := dynamicpb.NewMessage(desc)
+ fieldMap := map[string]*FieldDescription{}
+ fields := desc.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ f := fields.Get(i)
+ fieldMap[string(f.Name())] = newFieldDescription(f)
+ }
+ return &TypeDescription{
+ typeName: typeName,
+ desc: desc,
+ msgType: msgType,
+ fieldMap: fieldMap,
+ extensions: extensions,
+ reflectType: reflectTypeOf(msgZero),
+ zeroMsg: zeroValueOf(msgZero),
+ }
+}
+
+// TypeDescription is a collection of type metadata relevant to expression
+// checking and evaluation.
+type TypeDescription struct {
+ typeName string
+ desc protoreflect.MessageDescriptor
+ msgType protoreflect.MessageType
+ fieldMap map[string]*FieldDescription
+ extensions extensionMap
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// Copy copies the type description with updated references to the Db.
+func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription {
+ return &TypeDescription{
+ typeName: td.typeName,
+ desc: td.desc,
+ msgType: td.msgType,
+ fieldMap: td.fieldMap,
+ extensions: pbdb.extensions,
+ reflectType: td.reflectType,
+ zeroMsg: td.zeroMsg,
+ }
+}
+
+// FieldMap returns a string field name to FieldDescription map.
+func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
+ return td.fieldMap
+}
+
+// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
+func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
+ fd, found := td.fieldMap[name]
+ if found {
+ return fd, true
+ }
+ extFieldMap, found := td.extensions[td.typeName]
+ if !found {
+ return nil, false
+ }
+ fd, found = extFieldMap[name]
+ return fd, found
+}
+
+// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
+//
+// This method returns the unwrapped value and 'true', else the original value and 'false'.
+func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) {
+ return unwrap(td, msg)
+}
+
+// Name returns the fully-qualified name of the type.
+func (td *TypeDescription) Name() string {
+ return string(td.desc.FullName())
+}
+
+// New returns a mutable proto message
+func (td *TypeDescription) New() protoreflect.Message {
+ return td.msgType.New()
+}
+
+// ReflectType returns the Golang reflect.Type for this type.
+func (td *TypeDescription) ReflectType() reflect.Type {
+ return td.reflectType
+}
+
+// Zero returns the zero proto.Message value for this type.
+func (td *TypeDescription) Zero() proto.Message {
+ return td.zeroMsg
+}
+
+// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
+func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
+ var reflectType reflect.Type
+ var zeroMsg proto.Message
+ switch fieldDesc.Kind() {
+ case protoreflect.EnumKind:
+ reflectType = reflectTypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ zeroMsg = dynamicpb.NewMessage(fieldDesc.Message())
+ reflectType = reflectTypeOf(zeroMsg)
+ default:
+ reflectType = reflectTypeOf(fieldDesc.Default().Interface())
+ if fieldDesc.IsList() {
+ var elemValue protoreflect.Value
+ if fieldDesc.IsExtension() {
+ et := dynamicpb.NewExtensionType(fieldDesc)
+ elemValue = et.New().List().NewElement()
+ } else {
+ parentMsgType := fieldDesc.ContainingMessage()
+ parentMsg := dynamicpb.NewMessage(parentMsgType)
+ listField := parentMsg.NewField(fieldDesc).List()
+ elemValue = listField.NewElement()
+ }
+ elem := elemValue.Interface()
+ switch elemType := elem.(type) {
+ case protoreflect.Message:
+ elem = elemType.Interface()
+ }
+ reflectType = reflectTypeOf(elem)
+ }
+ }
+ // Ensure the list type is appropriately reflected as a Go-native list.
+ if fieldDesc.IsList() {
+ reflectType = reflect.SliceOf(reflectType)
+ }
+ var keyType, valType *FieldDescription
+ if fieldDesc.IsMap() {
+ keyType = newFieldDescription(fieldDesc.MapKey())
+ valType = newFieldDescription(fieldDesc.MapValue())
+ }
+ return &FieldDescription{
+ desc: fieldDesc,
+ KeyType: keyType,
+ ValueType: valType,
+ reflectType: reflectType,
+ zeroMsg: zeroValueOf(zeroMsg),
+ }
+}
+
+// FieldDescription holds metadata related to fields declared within a type.
+type FieldDescription struct {
+ // KeyType holds the key FieldDescription for map fields.
+ KeyType *FieldDescription
+ // ValueType holds the value FieldDescription for map fields.
+ ValueType *FieldDescription
+
+ desc protoreflect.FieldDescriptor
+ reflectType reflect.Type
+ zeroMsg proto.Message
+}
+
+// CheckedType returns the type-definition used at type-check time.
+func (fd *FieldDescription) CheckedType() *exprpb.Type {
+ if fd.desc.IsMap() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: fd.KeyType.typeDefToType(),
+ ValueType: fd.ValueType.typeDefToType(),
+ },
+ },
+ }
+ }
+ if fd.desc.IsList() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: fd.typeDefToType()}}}
+ }
+ return fd.typeDefToType()
+}
+
+// Descriptor returns the protoreflect.FieldDescriptor for this type.
+func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
+ return fd.desc
+}
+
+// IsSet returns whether the field is set on the target value, per the proto presence conventions
+// of proto2 or proto3 accordingly.
+//
+// This function implements the FieldType.IsSet function contract which can be used to operate on
+// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) IsSet(target any) bool {
+ switch v := target.(type) {
+ case proto.Message:
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ return pbRef.Has(fd.desc)
+ }
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name())))
+ default:
+ return false
+ }
+}
+
+// GetFrom returns the accessor method associated with the field on the proto generated struct.
+//
+// If the field is not set, the proto default value is returned instead.
+//
+// This function implements the FieldType.GetFrom function contract which can be used to operate
+// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
+func (fd *FieldDescription) GetFrom(target any) (any, error) {
+ v, ok := target.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
+ }
+ pbRef := v.ProtoReflect()
+ pbDesc := pbRef.Descriptor()
+ var fieldVal any
+ if pbDesc == fd.desc.ContainingMessage() {
+ // When the target protobuf shares the same message descriptor instance as the field
+ // descriptor, use the cached field descriptor value.
+ fieldVal = pbRef.Get(fd.desc).Interface()
+ } else {
+ // Otherwise, fallback to a dynamic lookup of the field descriptor from the target
+ // instance as an attempt to use the cached field descriptor will result in a panic.
+ fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface()
+ }
+ switch fv := fieldVal.(type) {
+ // Fast-path return for primitive types.
+ case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List:
+ return fv, nil
+ case protoreflect.EnumNumber:
+ return int64(fv), nil
+ case protoreflect.Map:
+ // Return a wrapper around the protobuf-reflected Map types which carries additional
+ // information about the key and value definitions of the map.
+ return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil
+ case protoreflect.Message:
+ // Make sure to unwrap well-known protobuf types before returning.
+ unwrapped, _, err := fd.MaybeUnwrapDynamic(fv)
+ return unwrapped, err
+ default:
+ return fv, nil
+ }
+}
+
+// IsEnum returns true if the field type refers to an enum value.
+func (fd *FieldDescription) IsEnum() bool {
+ return fd.ProtoKind() == protoreflect.EnumKind
+}
+
+// IsMap returns true if the field is of map type.
+func (fd *FieldDescription) IsMap() bool {
+ return fd.desc.IsMap()
+}
+
+// IsMessage returns true if the field is of message type.
+func (fd *FieldDescription) IsMessage() bool {
+ kind := fd.ProtoKind()
+ return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind
+}
+
+// IsOneof returns true if the field is declared within a oneof block.
+func (fd *FieldDescription) IsOneof() bool {
+ return fd.desc.ContainingOneof() != nil
+}
+
+// IsList returns true if the field is a repeated value.
+//
+// This method will also return true for map values, so check whether the
+// field is also a map.
+func (fd *FieldDescription) IsList() bool {
+ return fd.desc.IsList()
+}
+
+// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the
+// value can be unwrapped to a more primitive CEL type.
+//
+// This function returns the unwrapped value and 'true' on success, or the original value
+// and 'false' otherwise.
+func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) {
+ return unwrapDynamic(fd, msg)
+}
+
+// Name returns the CamelCase name of the field within the proto-based struct.
+func (fd *FieldDescription) Name() string {
+ return string(fd.desc.Name())
+}
+
+// ProtoKind returns the protobuf reflected kind of the field.
+func (fd *FieldDescription) ProtoKind() protoreflect.Kind {
+ return fd.desc.Kind()
+}
+
+// ReflectType returns the Golang reflect.Type for this field.
+func (fd *FieldDescription) ReflectType() reflect.Type {
+ return fd.reflectType
+}
+
+// String returns the fully qualified name of the field within its type as well as whether the
+// field occurs within a oneof.
+func (fd *FieldDescription) String() string {
+ return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof())
+}
+
+// Zero returns the zero value for the protobuf message represented by this field.
+//
+// If the field is not a proto.Message type, the zero value is nil.
+func (fd *FieldDescription) Zero() proto.Message {
+ return fd.zeroMsg
+}
+
+func (fd *FieldDescription) typeDefToType() *exprpb.Type {
+ if fd.IsMessage() {
+ msgType := string(fd.desc.Message().FullName())
+ if wk, found := CheckedWellKnowns[msgType]; found {
+ return wk
+ }
+ return checkedMessageType(msgType)
+ }
+ if fd.IsEnum() {
+ return checkedInt
+ }
+ return CheckedPrimitives[fd.ProtoKind()]
+}
+
+// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in
+// retrieving individual elements within CEL value data types.
+type Map struct {
+ protoreflect.Map
+ KeyType *FieldDescription
+ ValueType *FieldDescription
+}
+
+func checkedMessageType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{MessageType: name}}
+}
+
+func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{Primitive: primitive}}
+}
+
+func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}}
+}
+
+func checkedWrap(t *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}}
+}
+
+// unwrap unwraps the provided proto.Message value, potentially based on the description if the
+// input message is a *dynamicpb.Message which obscures the typing information from Go.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrap(desc description, msg proto.Message) (any, bool, error) {
+ switch v := msg.(type) {
+ case *anypb.Any:
+ dynMsg, err := v.UnmarshalNew()
+ if err != nil {
+ return v, false, err
+ }
+ return unwrapDynamic(desc, dynMsg.ProtoReflect())
+ case *dynamicpb.Message:
+ return unwrapDynamic(desc, v)
+ case *dpb.Duration:
+ return v.AsDuration(), true, nil
+ case *tpb.Timestamp:
+ return v.AsTime(), true, nil
+ case *structpb.Value:
+ switch v.GetKind().(type) {
+ case *structpb.Value_BoolValue:
+ return v.GetBoolValue(), true, nil
+ case *structpb.Value_ListValue:
+ return v.GetListValue(), true, nil
+ case *structpb.Value_NullValue:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case *structpb.Value_NumberValue:
+ return v.GetNumberValue(), true, nil
+ case *structpb.Value_StringValue:
+ return v.GetStringValue(), true, nil
+ case *structpb.Value_StructValue:
+ return v.GetStructValue(), true, nil
+ default:
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ case *wrapperspb.BoolValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.BytesValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.DoubleValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.FloatValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return float64(v.GetValue()), true, nil
+ case *wrapperspb.Int32Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return int64(v.GetValue()), true, nil
+ case *wrapperspb.Int64Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.StringValue:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ case *wrapperspb.UInt32Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return uint64(v.GetValue()), true, nil
+ case *wrapperspb.UInt64Value:
+ if v == nil {
+ return nil, true, nil
+ }
+ return v.GetValue(), true, nil
+ }
+ return msg, false, nil
+}
+
+// unwrapDynamic unwraps a reflected protobuf Message value.
+//
+// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
+func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) {
+ msg := refMsg.Interface()
+ if !refMsg.IsValid() {
+ msg = desc.Zero()
+ }
+ // In order to ensure that these wrapped types match the expectations of the CEL type system
+ // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value.
+ typeName := string(refMsg.Descriptor().FullName())
+ switch typeName {
+ case "google.protobuf.Any":
+ // Note, Any values require further unwrapping; however, this unwrapping may or may not
+ // be to a well-known type. If the unwrapped value is a well-known type it will be further
+ // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object
+ // represented by the Any will be returned.
+ unwrappedAny := &anypb.Any{}
+ err := Merge(unwrappedAny, msg)
+ if err != nil {
+ return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err)
+ }
+ dynMsg, err := unwrappedAny.UnmarshalNew()
+ if err != nil {
+ // Allow the error to move further up the stack as it should result in an type
+ // conversion error if the caller does not recover it somehow.
+ return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err)
+ }
+ // Attempt to unwrap the dynamic type, otherwise return the dynamic message.
+ unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect())
+ if err == nil && nested {
+ return unwrapped, true, nil
+ }
+ return dynMsg, true, err
+ case "google.protobuf.BoolValue",
+ "google.protobuf.BytesValue",
+ "google.protobuf.DoubleValue",
+ "google.protobuf.FloatValue",
+ "google.protobuf.Int32Value",
+ "google.protobuf.Int64Value",
+ "google.protobuf.StringValue",
+ "google.protobuf.UInt32Value",
+ "google.protobuf.UInt64Value":
+ // The msg value is ignored when dealing with wrapper types as they have a null or value
+ // behavior, rather than the standard zero value behavior of other proto message types.
+ if !refMsg.IsValid() {
+ return structpb.NullValue_NULL_VALUE, true, nil
+ }
+ valueField := refMsg.Descriptor().Fields().ByName("value")
+ return refMsg.Get(valueField).Interface(), true, nil
+ case "google.protobuf.Duration":
+ unwrapped := &dpb.Duration{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsDuration(), true, nil
+ case "google.protobuf.ListValue":
+ unwrapped := &structpb.ListValue{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.NullValue":
+ return structpb.NullValue_NULL_VALUE, true, nil
+ case "google.protobuf.Struct":
+ unwrapped := &structpb.Struct{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped, true, nil
+ case "google.protobuf.Timestamp":
+ unwrapped := &tpb.Timestamp{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrapped.AsTime(), true, nil
+ case "google.protobuf.Value":
+ unwrapped := &structpb.Value{}
+ err := Merge(unwrapped, msg)
+ if err != nil {
+ return nil, false, err
+ }
+ return unwrap(desc, unwrapped)
+ }
+ return msg, false, nil
+}
+
+// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
+// well-known protobuf reflected types expected by the CEL type system.
+func reflectTypeOf(val any) reflect.Type {
+ switch v := val.(type) {
+ case proto.Message:
+ return reflect.TypeOf(zeroValueOf(v))
+ default:
+ return reflect.TypeOf(v)
+ }
+}
+
+// zeroValueOf will return the strongest possible proto.Message representing the default protobuf
+// message value of the input msg type.
+func zeroValueOf(msg proto.Message) proto.Message {
+ if msg == nil {
+ return nil
+ }
+ typeName := string(msg.ProtoReflect().Descriptor().FullName())
+ zeroVal, found := zeroValueMap[typeName]
+ if found {
+ return zeroVal
+ }
+ return msg
+}
+
+var (
+ jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value"
+
+ zeroValueMap = map[string]proto.Message{
+ "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL},
+ "google.protobuf.Duration": &dpb.Duration{},
+ "google.protobuf.ListValue": &structpb.ListValue{},
+ "google.protobuf.Struct": &structpb.Struct{},
+ "google.protobuf.Timestamp": &tpb.Timestamp{},
+ "google.protobuf.Value": &structpb.Value{},
+ "google.protobuf.BoolValue": wrapperspb.Bool(false),
+ "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}),
+ "google.protobuf.DoubleValue": wrapperspb.Double(0.0),
+ "google.protobuf.FloatValue": wrapperspb.Float(0.0),
+ "google.protobuf.Int32Value": wrapperspb.Int32(0),
+ "google.protobuf.Int64Value": wrapperspb.Int64(0),
+ "google.protobuf.StringValue": wrapperspb.String(""),
+ "google.protobuf.UInt32Value": wrapperspb.UInt32(0),
+ "google.protobuf.UInt64Value": wrapperspb.UInt64(0),
+ }
+)
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
new file mode 100644
index 000000000..936a4e28b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -0,0 +1,766 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ dpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Adapter converts native Go values of varying type and complexity to equivalent CEL values.
+type Adapter = ref.TypeAdapter
+
+// Provider specifies functions for creating new object instances and for resolving
+// enum values by name.
+type Provider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) ref.Val
+
+ // FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+ FindIdent(identName string) (ref.Val, bool)
+
+ // FindStructType returns the Type give a qualified type name.
+ //
+ // For historical reasons, only struct types are expected to be returned through this
+ // method, and the type values are expected to be wrapped in a TypeType instance using
+ // TypeTypeWithParam().
+ //
+ // Returns false if not found.
+ FindStructType(structType string) (*Type, bool)
+
+ // FindStructFieldNames returns thet field names associated with the type, if the type
+ // is found.
+ FindStructFieldNames(structType string) ([]string, bool)
+
+ // FieldStructFieldType returns the field type for a checked type value. Returns
+ // false if the field could not be found.
+ FindStructFieldType(structType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field
+ // name to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked
+ // to convert the Val to the field's native type. If an error occurs during
+ // conversion, the NewValue will be a types.Err.
+ NewValue(structType string, fields map[string]ref.Val) ref.Val
+}
+
+// FieldType represents a field's type value and whether that field supports presence detection.
+type FieldType struct {
+ // Type of the field as a CEL native type value.
+ Type *Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet ref.FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom ref.FieldGetter
+}
+
+// Registry provides type information for a set of registered types.
+type Registry struct {
+ revTypeMap map[string]*Type
+ pbdb *pb.Db
+}
+
+// NewRegistry accepts a list of proto message instances and returns a type
+// provider which can create new instances of the provided message or any
+// message that proto depends upon in its FileDescriptor.
+func NewRegistry(types ...proto.Message) (*Registry, error) {
+ p := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+ err := p.RegisterType(
+ BoolType,
+ BytesType,
+ DoubleType,
+ DurationType,
+ IntType,
+ ListType,
+ MapType,
+ NullType,
+ StringType,
+ TimestampType,
+ TypeType,
+ UintType)
+ if err != nil {
+ return nil, err
+ }
+ // This block ensures that the well-known protobuf types are registered by default.
+ for _, fd := range p.pbdb.FileDescriptions() {
+ err = p.registerAllTypes(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, msgType := range types {
+ err = p.RegisterMessage(msgType)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return p, nil
+}
+
+// NewEmptyRegistry returns a registry which is completely unconfigured.
+func NewEmptyRegistry() *Registry {
+ return &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: pb.NewDb(),
+ }
+}
+
+// Copy copies the current state of the registry into its own memory space.
+func (p *Registry) Copy() *Registry {
+ copy := &Registry{
+ revTypeMap: make(map[string]*Type),
+ pbdb: p.pbdb.Copy(),
+ }
+ for k, v := range p.revTypeMap {
+ copy.revTypeMap[k] = v
+ }
+ return copy
+}
+
+// EnumValue returns the numeric value of the given enum value name.
+func (p *Registry) EnumValue(enumName string) ref.Val {
+ enumVal, found := p.pbdb.DescribeEnum(enumName)
+ if !found {
+ return NewErr("unknown enum name '%s'", enumName)
+ }
+ return Int(enumVal.Value())
+}
+
+// FindFieldType returns the field type for a checked type value. Returns false if
+// the field could not be found.
+//
+// Deprecated: use FindStructFieldType
+func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: field.CheckedType(),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FindStructFieldNames returns the set of field names for the given struct type,
+// if the type exists in the registry.
+func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return []string{}, false
+ }
+ fieldMap := msgType.FieldMap()
+ fields := make([]string, len(fieldMap))
+ idx := 0
+ for f := range fieldMap {
+ fields[idx] = f
+ idx++
+ }
+ return fields, true
+}
+
+// FindStructFieldType returns the field type for a checked type value. Returns
+// false if the field could not be found.
+func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) {
+ msgType, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &FieldType{
+ Type: fieldDescToCELType(field),
+ IsSet: field.IsSet,
+ GetFrom: field.GetFrom}, true
+}
+
+// FindIdent takes a qualified identifier name and returns a ref.Val if one exists.
+func (p *Registry) FindIdent(identName string) (ref.Val, bool) {
+ if t, found := p.revTypeMap[identName]; found {
+ return t, true
+ }
+ if enumVal, found := p.pbdb.DescribeEnum(identName); found {
+ return Int(enumVal.Value()), true
+ }
+ return nil, false
+}
+
+// FindType looks up the Type given a qualified typeName. Returns false if not found.
+//
+// Deprecated: use FindStructType
+func (p *Registry) FindType(structType string) (*exprpb.Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: structType}}}}, true
+}
+
+// FindStructType returns the Type give a qualified type name.
+//
+// For historical reasons, only struct types are expected to be returned through this
+// method, and the type values are expected to be wrapped in a TypeType instance using
+// TypeTypeWithParam().
+//
+// Returns false if not found.
+func (p *Registry) FindStructType(structType string) (*Type, bool) {
+ if _, found := p.pbdb.DescribeType(structType); !found {
+ return nil, false
+ }
+ if structType != "" && structType[0] == '.' {
+ structType = structType[1:]
+ }
+ return NewTypeTypeWithParam(NewObjectType(structType)), true
+}
+
+// NewValue creates a new type value from a qualified name and map of field
+// name to value.
+//
+// Note, for each value, the Val.ConvertToNative function will be invoked
+// to convert the Val to the field's native type. If an error occurs during
+// conversion, the NewValue will be a types.Err.
+func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val {
+ td, found := p.pbdb.DescribeType(structType)
+ if !found {
+ return NewErr("unknown type '%s'", structType)
+ }
+ msg := td.New()
+ fieldMap := td.FieldMap()
+ for name, value := range fields {
+ field, found := fieldMap[name]
+ if !found {
+ return NewErr("no such field: %s", name)
+ }
+ err := msgSetField(msg, field, value)
+ if err != nil {
+ return &Err{error: err}
+ }
+ }
+ return p.NativeToValue(msg.Interface())
+}
+
+// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error {
+ fd, err := p.pbdb.RegisterDescriptor(fileDesc)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterMessage registers a protocol buffer message and its dependencies.
+func (p *Registry) RegisterMessage(message proto.Message) error {
+ fd, err := p.pbdb.RegisterMessage(message)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+// RegisterType registers a type value with the provider which ensures the provider is aware of how to
+// map the type to an identifier.
+//
+// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name.
+// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the
+// traits present on the input and the runtime type name. By default this foreign type will be treated
+// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the
+// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions
+// to CEL, even when they're not based on protobuf types.
+func (p *Registry) RegisterType(types ...ref.Type) error {
+ for _, t := range types {
+ celType := maybeForeignType(t)
+ existing, found := p.revTypeMap[t.TypeName()]
+ if !found {
+ p.revTypeMap[t.TypeName()] = celType
+ continue
+ }
+ if !existing.IsEquivalentType(celType) {
+ return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType)
+ }
+ if existing.traitMask != celType.traitMask {
+ return fmt.Errorf(
+ "type registered with conflicting traits: %v with traits %v, input: %v",
+ existing.TypeName(), existing.traitMask, celType.traitMask)
+ }
+ }
+ return nil
+}
+
+// NativeToValue converts various "native" types to ref.Val with this specific implementation
+// providing support for custom proto-based types.
+//
+// This method should be the inverse of ref.Val.ConvertToNative.
+func (p *Registry) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(p, value); found {
+ return val
+ }
+ switch v := value.(type) {
+ case proto.Message:
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := p.pbdb.DescribeType(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ unwrapped, isUnwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v)
+ }
+ if isUnwrapped {
+ return p.NativeToValue(unwrapped)
+ }
+ typeVal, found := p.FindIdent(typeName)
+ if !found {
+ return NewErr("unknown type: '%s'", typeName)
+ }
+ return NewObject(p, td, typeVal, v)
+ case *pb.Map:
+ return NewProtoMap(p, v)
+ case protoreflect.List:
+ return NewProtoList(p, v)
+ case protoreflect.Message:
+ return p.NativeToValue(v.Interface())
+ case protoreflect.Value:
+ return p.NativeToValue(v.Interface())
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+func (p *Registry) registerAllTypes(fd *pb.FileDescription) error {
+ for _, typeName := range fd.GetTypeNames() {
+ // skip well-known type names since they're automatically sanitized
+ // during NewObjectType() calls.
+ if _, found := checkedWellKnowns[typeName]; found {
+ continue
+ }
+ err := p.RegisterType(NewObjectTypeValue(typeName))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func fieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMap() {
+ return NewMapType(
+ singularFieldDescToCELType(field.KeyType),
+ singularFieldDescToCELType(field.ValueType))
+ }
+ if field.IsList() {
+ return NewListType(singularFieldDescToCELType(field))
+ }
+ return singularFieldDescToCELType(field)
+}
+
+func singularFieldDescToCELType(field *pb.FieldDescription) *Type {
+ if field.IsMessage() {
+ return NewObjectType(string(field.Descriptor().Message().FullName()))
+ }
+ if field.IsEnum() {
+ return IntType
+ }
+ return ProtoCELPrimitives[field.ProtoKind()]
+}
+
+// defaultTypeAdapter converts go native types to CEL values.
+type defaultTypeAdapter struct{}
+
+var (
+ // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values.
+ DefaultTypeAdapter = &defaultTypeAdapter{}
+)
+
+// NativeToValue implements the ref.TypeAdapter interface.
+func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
+ if val, found := nativeToValue(a, value); found {
+ return val
+ }
+ return UnsupportedRefValConversionErr(value)
+}
+
+// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
+// otherwise (nil, false)
+func nativeToValue(a Adapter, value any) (ref.Val, bool) {
+ switch v := value.(type) {
+ case nil:
+ return NullValue, true
+ case *Bool:
+ if v != nil {
+ return *v, true
+ }
+ case *Bytes:
+ if v != nil {
+ return *v, true
+ }
+ case *Double:
+ if v != nil {
+ return *v, true
+ }
+ case *Int:
+ if v != nil {
+ return *v, true
+ }
+ case *String:
+ if v != nil {
+ return *v, true
+ }
+ case *Uint:
+ if v != nil {
+ return *v, true
+ }
+ case bool:
+ return Bool(v), true
+ case int:
+ return Int(v), true
+ case int32:
+ return Int(v), true
+ case int64:
+ return Int(v), true
+ case uint:
+ return Uint(v), true
+ case uint32:
+ return Uint(v), true
+ case uint64:
+ return Uint(v), true
+ case float32:
+ return Double(v), true
+ case float64:
+ return Double(v), true
+ case string:
+ return String(v), true
+ case *dpb.Duration:
+ return Duration{Duration: v.AsDuration()}, true
+ case time.Duration:
+ return Duration{Duration: v}, true
+ case *tpb.Timestamp:
+ return Timestamp{Time: v.AsTime()}, true
+ case time.Time:
+ return Timestamp{Time: v}, true
+ case *bool:
+ if v != nil {
+ return Bool(*v), true
+ }
+ case *float32:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *float64:
+ if v != nil {
+ return Double(*v), true
+ }
+ case *int:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int32:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *int64:
+ if v != nil {
+ return Int(*v), true
+ }
+ case *string:
+ if v != nil {
+ return String(*v), true
+ }
+ case *uint:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint32:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case *uint64:
+ if v != nil {
+ return Uint(*v), true
+ }
+ case []byte:
+ return Bytes(v), true
+ // specializations for common lists types.
+ case []string:
+ return NewStringList(a, v), true
+ case []ref.Val:
+ return NewRefValList(a, v), true
+ // specializations for common map types.
+ case map[string]string:
+ return NewStringStringMap(a, v), true
+ case map[string]any:
+ return NewStringInterfaceMap(a, v), true
+ case map[ref.Val]ref.Val:
+ return NewRefValMap(a, v), true
+ // additional specializations may be added upon request / need.
+ case *anypb.Any:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ unpackedAny, err := v.UnmarshalNew()
+ if err != nil {
+ return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true
+ }
+ return a.NativeToValue(unpackedAny), true
+ case *structpb.NullValue, structpb.NullValue:
+ return NullValue, true
+ case *structpb.ListValue:
+ return NewJSONList(a, v), true
+ case *structpb.Struct:
+ return NewJSONStruct(a, v), true
+ case ref.Val:
+ return v, true
+ case protoreflect.EnumNumber:
+ return Int(v), true
+ case proto.Message:
+ if v == nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ typeName := string(v.ProtoReflect().Descriptor().FullName())
+ td, found := pb.DefaultDb.DescribeType(typeName)
+ if !found {
+ return nil, false
+ }
+ val, unwrapped, err := td.MaybeUnwrap(v)
+ if err != nil {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ if !unwrapped {
+ return nil, false
+ }
+ return a.NativeToValue(val), true
+ // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces
+ // which means that this case must appear after handling a proto.Message type.
+ case protoreflect.Message:
+ return a.NativeToValue(v.Interface()), true
+ default:
+ refValue := reflect.ValueOf(v)
+ if refValue.Kind() == reflect.Ptr {
+ if refValue.IsNil() {
+ return UnsupportedRefValConversionErr(v), true
+ }
+ refValue = refValue.Elem()
+ }
+ refKind := refValue.Kind()
+ switch refKind {
+ case reflect.Array, reflect.Slice:
+ if refValue.Type().Elem() == reflect.TypeOf(byte(0)) {
+ if refValue.CanAddr() {
+ return Bytes(refValue.Bytes()), true
+ }
+ tmp := reflect.New(refValue.Type())
+ tmp.Elem().Set(refValue)
+ return Bytes(tmp.Elem().Bytes()), true
+ }
+ return NewDynamicList(a, v), true
+ case reflect.Map:
+ return NewDynamicMap(a, v), true
+ // type aliases of primitive types cannot be asserted as that type, but rather need
+ // to be downcast to int32 before being converted to a CEL representation.
+ case reflect.Bool:
+ boolTupe := reflect.TypeOf(false)
+ return Bool(refValue.Convert(boolTupe).Interface().(bool)), true
+ case reflect.Int:
+ intType := reflect.TypeOf(int(0))
+ return Int(refValue.Convert(intType).Interface().(int)), true
+ case reflect.Int8:
+ intType := reflect.TypeOf(int8(0))
+ return Int(refValue.Convert(intType).Interface().(int8)), true
+ case reflect.Int16:
+ intType := reflect.TypeOf(int16(0))
+ return Int(refValue.Convert(intType).Interface().(int16)), true
+ case reflect.Int32:
+ intType := reflect.TypeOf(int32(0))
+ return Int(refValue.Convert(intType).Interface().(int32)), true
+ case reflect.Int64:
+ intType := reflect.TypeOf(int64(0))
+ return Int(refValue.Convert(intType).Interface().(int64)), true
+ case reflect.Uint:
+ uintType := reflect.TypeOf(uint(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint)), true
+ case reflect.Uint8:
+ uintType := reflect.TypeOf(uint8(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint8)), true
+ case reflect.Uint16:
+ uintType := reflect.TypeOf(uint16(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint16)), true
+ case reflect.Uint32:
+ uintType := reflect.TypeOf(uint32(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint32)), true
+ case reflect.Uint64:
+ uintType := reflect.TypeOf(uint64(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint64)), true
+ case reflect.Float32:
+ doubleType := reflect.TypeOf(float32(0))
+ return Double(refValue.Convert(doubleType).Interface().(float32)), true
+ case reflect.Float64:
+ doubleType := reflect.TypeOf(float64(0))
+ return Double(refValue.Convert(doubleType).Interface().(float64)), true
+ case reflect.String:
+ stringType := reflect.TypeOf("")
+ return String(refValue.Convert(stringType).Interface().(string)), true
+ }
+ }
+ return nil, false
+}
+
+func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error {
+ if field.IsList() {
+ lv := target.NewField(field.Descriptor())
+ list, ok := val.(traits.Lister)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetListField(lv.List(), field, list)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), lv)
+ return nil
+ }
+ if field.IsMap() {
+ mv := target.NewField(field.Descriptor())
+ mp, ok := val.(traits.Mapper)
+ if !ok {
+ return unsupportedTypeConversionError(field, val)
+ }
+ err := msgSetMapField(mv.Map(), field, mp)
+ if err != nil {
+ return err
+ }
+ target.Set(field.Descriptor(), mv)
+ return nil
+ }
+ v, err := val.ConvertToNative(field.ReflectType())
+ if err != nil {
+ return fieldTypeConversionError(field, err)
+ }
+ if v == nil {
+ return nil
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(field.Descriptor(), protoreflect.ValueOf(v))
+ return nil
+}
+
+func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error {
+ elemReflectType := listField.ReflectType().Elem()
+ for i := Int(0); i < listVal.Size().(Int); i++ {
+ elem := listVal.Get(i)
+ elemVal, err := elem.ConvertToNative(elemReflectType)
+ if err != nil {
+ return fieldTypeConversionError(listField, err)
+ }
+ if elemVal == nil {
+ continue
+ }
+ switch ev := elemVal.(type) {
+ case proto.Message:
+ elemVal = ev.ProtoReflect()
+ }
+ target.Append(protoreflect.ValueOf(elemVal))
+ }
+ return nil
+}
+
+func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error {
+ targetKeyType := mapField.KeyType.ReflectType()
+ targetValType := mapField.ValueType.ReflectType()
+ it := mapVal.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ val := mapVal.Get(key)
+ k, err := key.ConvertToNative(targetKeyType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ v, err := val.ConvertToNative(targetValType)
+ if err != nil {
+ return fieldTypeConversionError(mapField, err)
+ }
+ if v == nil {
+ continue
+ }
+ switch pv := v.(type) {
+ case proto.Message:
+ v = pv.ProtoReflect()
+ }
+ target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
+ }
+ return nil
+}
+
+func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type())
+}
+
+func fieldTypeConversionError(field *pb.FieldDescription, err error) error {
+ msgName := field.Descriptor().ContainingMessage().FullName()
+ return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err)
+}
+
+var (
+ // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type.
+ ProtoCELPrimitives = map[protoreflect.Kind]*Type{
+ protoreflect.BoolKind: BoolType,
+ protoreflect.BytesKind: BytesType,
+ protoreflect.DoubleKind: DoubleType,
+ protoreflect.FloatKind: DoubleType,
+ protoreflect.Int32Kind: IntType,
+ protoreflect.Int64Kind: IntType,
+ protoreflect.Sint32Kind: IntType,
+ protoreflect.Sint64Kind: IntType,
+ protoreflect.Uint32Kind: UintType,
+ protoreflect.Uint64Kind: UintType,
+ protoreflect.Fixed32Kind: UintType,
+ protoreflect.Fixed64Kind: UintType,
+ protoreflect.Sfixed32Kind: IntType,
+ protoreflect.Sfixed64Kind: IntType,
+ protoreflect.StringKind: StringType,
+ }
+)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
new file mode 100644
index 000000000..79330c332
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
@@ -0,0 +1,20 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "provider.go",
+ "reference.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/ref",
+ deps = [
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go
new file mode 100644
index 000000000..b9820023d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -0,0 +1,102 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ref
+
+import (
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// TypeProvider specifies functions for creating new object instances and for
+// resolving enum values by name.
+//
+// Deprecated: use types.Provider
+type TypeProvider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) Val
+
+ // FindIdent takes a qualified identifier name and returns a Value if one exists.
+ FindIdent(identName string) (Val, bool)
+
+ // FindType looks up the Type given a qualified typeName. Returns false if not found.
+ FindType(typeName string) (*exprpb.Type, bool)
+
+ // FieldFieldType returns the field type for a checked type value. Returns false if
+ // the field could not be found.
+ FindFieldType(messageType, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and map of field name
+ // to value.
+ //
+ // Note, for each value, the Val.ConvertToNative function will be invoked to convert
+ // the Val to the field's native type. If an error occurs during conversion, the
+ // NewValue will be a types.Err.
+ NewValue(typeName string, fields map[string]Val) Val
+}
+
+// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
+//
+// Deprecated: use types.Adapter
+type TypeAdapter interface {
+ // NativeToValue converts the input `value` to a CEL `ref.Val`.
+ NativeToValue(value any) Val
+}
+
+// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
+// implementations support type-customization, so these features are optional. However, a
+// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
+// which are registered can be converted to CEL representations.
+//
+// Deprecated: use types.Registry
+type TypeRegistry interface {
+ TypeAdapter
+ TypeProvider
+
+ // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+ RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error
+
+ // RegisterMessage registers a protocol buffer message and its dependencies.
+ RegisterMessage(message proto.Message) error
+
+ // RegisterType registers a type value with the provider which ensures the
+ // provider is aware of how to map the type to an identifier.
+ //
+ // If a type is provided more than once with an alternative definition, the
+ // call will result in an error.
+ RegisterType(types ...Type) error
+}
+
+// FieldType represents a field's type value and whether that field supports
+// presence detection.
+//
+// Deprecated: use types.FieldType
+type FieldType struct {
+ // Type of the field as a protobuf type value.
+ Type *exprpb.Type
+
+ // IsSet indicates whether the field is set on an input object.
+ IsSet FieldTester
+
+ // GetFrom retrieves the field value on the input object, if set.
+ GetFrom FieldGetter
+}
+
+// FieldTester is used to test field presence on an input object.
+type FieldTester func(target any) bool
+
+// FieldGetter is used to get the field value from an input object, if set.
+type FieldGetter func(target any) (any, error)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go
new file mode 100644
index 000000000..e0d58145c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -0,0 +1,63 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ref contains the reference interfaces used throughout the types components.
+package ref
+
+import (
+ "reflect"
+)
+
+// Type interface indicate the name of a given type.
+type Type interface {
+ // HasTrait returns whether the type has a given trait associated with it.
+ //
+ // See common/types/traits/traits.go for a list of supported traits.
+ HasTrait(trait int) bool
+
+ // TypeName returns the qualified type name of the type.
+ //
+ // The type name is also used as the type's identifier name at type-check and interpretation time.
+ TypeName() string
+}
+
+// Val interface defines the functions supported by all expression values.
+// Val implementations may specialize the behavior of the value through the addition of traits.
+type Val interface {
+ // ConvertToNative converts the Value to a native Go struct according to the
+ // reflected type description, or error if the conversion is not feasible.
+ //
+ // The ConvertToNative method is intended to be used to support conversion between CEL types
+ // and native types during object creation expressions or by clients who need to adapt the,
+ // returned CEL value into an equivalent Go value instance.
+ //
+ // When implementing or using ConvertToNative, the following guidelines apply:
+ // - Use ConvertToNative when marshalling CEL evaluation results to native types.
+ // - Do not use ConvertToNative within CEL extension functions.
+ // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf.
+ ConvertToNative(typeDesc reflect.Type) (any, error)
+
+ // ConvertToType supports type conversions between CEL value types supported by the expression language.
+ ConvertToType(typeValue Type) Val
+
+ // Equal returns true if the `other` value has the same type and content as the implementing struct.
+ Equal(other Val) Val
+
+ // Type returns the TypeValue of the value.
+ Type() Type
+
+ // Value returns the raw value of the instance which may not be directly compatible with the expression
+ // language types.
+ Value() any
+}
diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go
new file mode 100644
index 000000000..3a93743f2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/string.go
@@ -0,0 +1,226 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String type implementation which supports addition, comparison, matching,
+// and size functions.
+type String string
+
+var (
+ stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{
+ overloads.Contains: StringContains,
+ overloads.EndsWith: StringEndsWith,
+ overloads.StartsWith: StringStartsWith,
+ }
+
+ stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{})
+)
+
+// Add implements traits.Adder.Add.
+func (s String) Add(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return s + otherString
+}
+
+// Compare implements traits.Comparer.Compare.
+func (s String) Compare(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ return Int(strings.Compare(s.Value().(string), otherString.Value().(string)))
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.String:
+ return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.String(string(s)))
+ case jsonValueType:
+ // Convert to a protobuf representation of a JSON String.
+ return structpb.NewStringValue(string(s)), nil
+ case stringWrapperType:
+ // Convert to a wrapperspb.StringValue.
+ return wrapperspb.String(string(s)), nil
+ }
+ if typeDesc.Elem().Kind() == reflect.String {
+ p := s.Value().(string)
+ return &p, nil
+ }
+ case reflect.Interface:
+ sv := s.Value()
+ if reflect.TypeOf(sv).Implements(typeDesc) {
+ return sv, nil
+ }
+ if reflect.TypeOf(s).Implements(typeDesc) {
+ return s, nil
+ }
+ }
+ return nil, fmt.Errorf(
+ "unsupported native conversion from string to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (s String) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil {
+ return Int(n)
+ }
+ case UintType:
+ if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil {
+ return Uint(n)
+ }
+ case DoubleType:
+ if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil {
+ return Double(n)
+ }
+ case BoolType:
+ if b, err := strconv.ParseBool(s.Value().(string)); err == nil {
+ return Bool(b)
+ }
+ case BytesType:
+ return Bytes(s)
+ case DurationType:
+ if d, err := time.ParseDuration(s.Value().(string)); err == nil {
+ return durationOf(d)
+ }
+ case TimestampType:
+ if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil {
+ if t.Unix() < minUnixTime || t.Unix() > maxUnixTime {
+ return celErrTimestampOverflow
+ }
+ return timestampOf(t)
+ }
+ case StringType:
+ return s
+ case TypeType:
+ return StringType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (s String) Equal(other ref.Val) ref.Val {
+ otherString, ok := other.(String)
+ return Bool(ok && s == otherString)
+}
+
+// IsZeroValue returns true if the string is empty.
+func (s String) IsZeroValue() bool {
+ return len(s) == 0
+}
+
+// Match implements traits.Matcher.Match.
+func (s String) Match(pattern ref.Val) ref.Val {
+ pat, ok := pattern.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pattern)
+ }
+ matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string))
+ if err != nil {
+ return &Err{error: err}
+ }
+ return Bool(matched)
+}
+
+// Receive implements traits.Receiver.Receive.
+func (s String) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 1:
+ if f, found := stringOneArgOverloads[function]; found {
+ return f(s, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Size implements traits.Sizer.Size.
+func (s String) Size() ref.Val {
+ return Int(len([]rune(s.Value().(string))))
+}
+
+// Type implements ref.Val.Type.
+func (s String) Type() ref.Type {
+ return StringType
+}
+
+// Value implements ref.Val.Value.
+func (s String) Value() any {
+ return string(s)
+}
+
+// StringContains returns whether the string contains a substring.
+func StringContains(s, sub ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ subStr, ok := sub.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(sub)
+ }
+ return Bool(strings.Contains(string(str), string(subStr)))
+}
+
+// StringEndsWith returns whether the target string contains the input suffix.
+func StringEndsWith(s, suf ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ sufStr, ok := suf.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(suf)
+ }
+ return Bool(strings.HasSuffix(string(str), string(sufStr)))
+}
+
+// StringStartsWith returns whether the target string contains the input prefix.
+func StringStartsWith(s, pre ref.Val) ref.Val {
+ str, ok := s.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(s)
+ }
+ preStr, ok := pre.(String)
+ if !ok {
+ return MaybeNoSuchOverloadErr(pre)
+ }
+ return Bool(strings.HasPrefix(string(str), string(preStr)))
+}
diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go
new file mode 100644
index 000000000..33acdea8e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -0,0 +1,311 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ tpb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Timestamp type implementation which supports add, compare, and subtract
+// operations. Timestamps are also capable of participating in dynamic
+// function dispatch to instance methods.
+type Timestamp struct {
+ time.Time
+}
+
+func timestampOf(t time.Time) Timestamp {
+ // Note that this function does not validate that time.Time is in our supported range.
+ return Timestamp{Time: t}
+}
+
+const (
+ // The number of seconds between year 1 and year 1970. This is borrowed from
+ // https://golang.org/src/time/time.go.
+ unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24)
+
+ // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch.
+ minUnixTime int64 = -62135596800
+ // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch.
+ maxUnixTime int64 = 253402300799
+)
+
+// Add implements traits.Adder.Add.
+func (t Timestamp) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ return other.(Duration).Add(t)
+ }
+ return MaybeNoSuchOverloadErr(other)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (t Timestamp) Compare(other ref.Val) ref.Val {
+ if TimestampType != other.Type() {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ ts1 := t.Time
+ ts2 := other.(Timestamp).Time
+ switch {
+ case ts1.Before(ts2):
+ return IntNegOne
+ case ts1.After(ts2):
+ return IntOne
+ default:
+ return IntZero
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ // If the timestamp is already assignable to the desired type return it.
+ if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
+ return t.Time, nil
+ }
+ if reflect.TypeOf(t).AssignableTo(typeDesc) {
+ return t, nil
+ }
+ switch typeDesc {
+ case anyValueType:
+ // Pack the underlying time as a tpb.Timestamp into an Any value.
+ return anypb.New(tpb.New(t.Time))
+ case jsonValueType:
+ // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON
+ // string.
+ v := t.ConvertToType(StringType)
+ if IsError(v) {
+ return nil, v.(*Err)
+ }
+ return structpb.NewStringValue(string(v.(String))), nil
+ case timestampValueType:
+ // Unwrap the underlying tpb.Timestamp.
+ return tpb.New(t.Time), nil
+ }
+ return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(t.Format(time.RFC3339Nano))
+ case IntType:
+ // Return the Unix time in seconds since 1970
+ return Int(t.Unix())
+ case TimestampType:
+ return t
+ case TypeType:
+ return TimestampType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (t Timestamp) Equal(other ref.Val) ref.Val {
+ otherTime, ok := other.(Timestamp)
+ return Bool(ok && t.Time.Equal(otherTime.Time))
+}
+
+// IsZeroValue returns true if the timestamp is epoch 0.
+func (t Timestamp) IsZeroValue() bool {
+ return t.IsZero()
+}
+
+// Receive implements traits.Receiver.Receive.
+func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 0:
+ if f, found := timestampZeroArgOverloads[function]; found {
+ return f(t.Time)
+ }
+ case 1:
+ if f, found := timestampOneArgOverloads[function]; found {
+ return f(t.Time, args[0])
+ }
+ }
+ return NoSuchOverloadErr()
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
+ switch subtrahend.Type() {
+ case DurationType:
+ dur := subtrahend.(Duration)
+ val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return timestampOf(val)
+ case TimestampType:
+ t2 := subtrahend.(Timestamp).Time
+ val, err := subtractTimeChecked(t.Time, t2)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return durationOf(val)
+ }
+ return MaybeNoSuchOverloadErr(subtrahend)
+}
+
+// Type implements ref.Val.Type.
+func (t Timestamp) Type() ref.Type {
+ return TimestampType
+}
+
+// Value implements ref.Val.Value.
+func (t Timestamp) Value() any {
+ return t.Time
+}
+
+var (
+ timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
+
+ timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYear,
+ overloads.TimeGetMonth: timestampGetMonth,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYear,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBased,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeek,
+ overloads.TimeGetHours: timestampGetHours,
+ overloads.TimeGetMinutes: timestampGetMinutes,
+ overloads.TimeGetSeconds: timestampGetSeconds,
+ overloads.TimeGetMilliseconds: timestampGetMilliseconds}
+
+ timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYearWithTz,
+ overloads.TimeGetMonth: timestampGetMonthWithTz,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz,
+ overloads.TimeGetHours: timestampGetHoursWithTz,
+ overloads.TimeGetMinutes: timestampGetMinutesWithTz,
+ overloads.TimeGetSeconds: timestampGetSecondsWithTz,
+ overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz}
+)
+
+type timestampVisitor func(time.Time) ref.Val
+
+func timestampGetFullYear(t time.Time) ref.Val {
+ return Int(t.Year())
+}
+func timestampGetMonth(t time.Time) ref.Val {
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return Int(t.Month() - 1)
+}
+func timestampGetDayOfYear(t time.Time) ref.Val {
+ return Int(t.YearDay() - 1)
+}
+func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val {
+ return Int(t.Day() - 1)
+}
+func timestampGetDayOfMonthOneBased(t time.Time) ref.Val {
+ return Int(t.Day())
+}
+func timestampGetDayOfWeek(t time.Time) ref.Val {
+ return Int(t.Weekday())
+}
+func timestampGetHours(t time.Time) ref.Val {
+ return Int(t.Hour())
+}
+func timestampGetMinutes(t time.Time) ref.Val {
+ return Int(t.Minute())
+}
+func timestampGetSeconds(t time.Time) ref.Val {
+ return Int(t.Second())
+}
+func timestampGetMilliseconds(t time.Time) ref.Val {
+ return Int(t.Nanosecond() / 1000000)
+}
+
+func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetFullYear)(t)
+}
+func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMonth)(t)
+}
+func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfYear)(t)
+}
+func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthZeroBased)(t)
+}
+func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthOneBased)(t)
+}
+func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfWeek)(t)
+}
+func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetHours)(t)
+}
+func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMinutes)(t)
+}
+func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetSeconds)(t)
+}
+func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMilliseconds)(t)
+}
+
+func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
+ return func(t time.Time) ref.Val {
+ if StringType != tz.Type() {
+ return MaybeNoSuchOverloadErr(tz)
+ }
+ val := string(tz.(String))
+ ind := strings.Index(val, ":")
+ if ind == -1 {
+ loc, err := time.LoadLocation(val)
+ if err != nil {
+ return WrapErr(err)
+ }
+ return visitor(t.In(loc))
+ }
+
+ // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC
+ // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
+ hr, err := strconv.Atoi(string(val[0:ind]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ min, err := strconv.Atoi(string(val[ind+1:]))
+ if err != nil {
+ return WrapErr(err)
+ }
+ var offset int
+ if string(val[0]) == "-" {
+ offset = hr*60 - min
+ } else {
+ offset = hr*60 + min
+ }
+ secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds())
+ timezone := time.FixedZone("", secondsEastOfUTC)
+ return visitor(t.In(timezone))
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
new file mode 100644
index 000000000..b19eb8301
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
@@ -0,0 +1,29 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "comparer.go",
+ "container.go",
+ "field_tester.go",
+ "indexer.go",
+ "iterator.go",
+ "lister.go",
+ "mapper.go",
+ "matcher.go",
+ "math.go",
+ "receiver.go",
+ "sizer.go",
+ "traits.go",
+ "zeroer.go",
+ ],
+ importpath = "github.com/google/cel-go/common/types/traits",
+ deps = [
+ "//common/types/ref:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
new file mode 100644
index 000000000..b531d9ae2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Comparer interface for ordering comparisons between values in order to
+// support '<', '<=', '>=', '>' overloads.
+type Comparer interface {
+ // Compare this value to the input other value, returning an Int:
+ //
+ // this < other -> Int(-1)
+ // this == other -> Int(0)
+ // this > other -> Int(1)
+ //
+ // If the comparison cannot be made or is not supported, an error should
+ // be returned.
+ Compare(other ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/container.go b/vendor/github.com/google/cel-go/common/types/traits/container.go
new file mode 100644
index 000000000..cf5c621ae
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/container.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Container interface which permits containment tests such as 'a in b'.
+type Container interface {
+ // Contains returns true if the value exists within the object.
+ Contains(value ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
new file mode 100644
index 000000000..816a95652
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
@@ -0,0 +1,30 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// FieldTester indicates if a defined field on an object type is set to a
+// non-default value.
+//
+// For use with the `has()` macro.
+type FieldTester interface {
+ // IsSet returns true if the field is defined and set to a non-default
+ // value. The method will return false if defined and not set, and an error
+ // if the field is not defined.
+ IsSet(field ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
new file mode 100644
index 000000000..662c6836c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Indexer permits random access of elements by index 'a[b()]'.
+type Indexer interface {
+ // Get the value at the specified index or error.
+ Get(index ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
new file mode 100644
index 000000000..91c10f08f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Iterable aggregate types permit traversal over their elements.
+type Iterable interface {
+ // Iterator returns a new iterator view of the struct.
+ Iterator() Iterator
+}
+
+// Iterator permits safe traversal over the contents of an aggregate type.
+type Iterator interface {
+ ref.Val
+
+ // HasNext returns true if there are unvisited elements in the Iterator.
+ HasNext() ref.Val
+
+ // Next returns the next element.
+ Next() ref.Val
+}
+
+// Foldable aggregate types support iteration over (key, value) or (index, value) pairs.
+type Foldable interface {
+ // Fold invokes the Folder.FoldEntry for all entries in the type
+ Fold(Folder)
+}
+
+// Folder performs a fold on a given entry and indicates whether to continue folding.
+type Folder interface {
+ // FoldEntry indicates the key, value pair associated with the entry.
+ // If the output is true, continue folding. Otherwise, terminate the fold.
+ FoldEntry(key, val any) bool
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go
new file mode 100644
index 000000000..e54781a60
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Lister interface which aggregates the traits of a list.
+type Lister interface {
+ ref.Val
+ Adder
+ Container
+ Indexer
+ Iterable
+ Sizer
+}
+
+// MutableLister interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
+type MutableLister interface {
+ Lister
+ ToImmutableList() Lister
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
new file mode 100644
index 000000000..d13333f3f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Mapper interface which aggregates the traits of a maps.
+type Mapper interface {
+ ref.Val
+ Container
+ Indexer
+ Iterable
+ Sizer
+
+ // Find returns a value, if one exists, for the input key.
+ //
+ // If the key is not found the function returns (nil, false).
+ // If the input key is not valid for the map, or is Err or Unknown the function returns
+ // (Unknown|Err, false).
+ Find(key ref.Val) (ref.Val, bool)
+}
+
+// MutableMapper interface which emits an immutable result after an intermediate computation.
+//
+// Note, this interface is intended only to be used within Comprehensions where the mutable
+// value is not directly observable within the user-authored CEL expression.
+type MutableMapper interface {
+ Mapper
+
+ // Insert a key, value pair into the map, returning the map if the insert is successful
+ // and an error if key already exists in the mutable map.
+ Insert(k, v ref.Val) ref.Val
+
+ // ToImmutableMap converts a mutable map into an immutable map.
+ ToImmutableMap() Mapper
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
new file mode 100644
index 000000000..085dc94ff
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Matcher interface for supporting 'matches()' overloads.
+type Matcher interface {
+ // Match returns true if the pattern matches the current value.
+ Match(pattern ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/math.go b/vendor/github.com/google/cel-go/common/types/traits/math.go
new file mode 100644
index 000000000..86d5b9137
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/math.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Adder interface to support '+' operator overloads.
+type Adder interface {
+ // Add returns a combination of the current value and other value.
+ //
+ // If the other value is an unsupported type, an error is returned.
+ Add(other ref.Val) ref.Val
+}
+
+// Divider interface to support '/' operator overloads.
+type Divider interface {
+ // Divide returns the result of dividing the current value by the input
+ // denominator.
+ //
+ // A denominator value of zero results in an error.
+ Divide(denominator ref.Val) ref.Val
+}
+
+// Modder interface to support '%' operator overloads.
+type Modder interface {
+ // Modulo returns the result of taking the modulus of the current value
+ // by the denominator.
+ //
+ // A denominator value of zero results in an error.
+ Modulo(denominator ref.Val) ref.Val
+}
+
+// Multiplier interface to support '*' operator overloads.
+type Multiplier interface {
+ // Multiply returns the result of multiplying the current and input value.
+ Multiply(other ref.Val) ref.Val
+}
+
+// Negater interface to support unary '-' and '!' operator overloads.
+type Negater interface {
+ // Negate returns the complement of the current value.
+ Negate() ref.Val
+}
+
+// Subtractor interface to support binary '-' operator overloads.
+type Subtractor interface {
+ // Subtract returns the result of subtracting the input from the current
+ // value.
+ Subtract(subtrahend ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
new file mode 100644
index 000000000..8f41db45e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Receiver interface for routing instance method calls within a value.
+type Receiver interface {
+ // Receive accepts a function name, overload id, and arguments and returns
+ // a value.
+ Receive(function string, overload string, args []ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
new file mode 100644
index 000000000..b80d25137
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Sizer interface for supporting 'size()' overloads.
+type Sizer interface {
+ // Size returns the number of elements or length of the value.
+ Size() ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go
new file mode 100644
index 000000000..51a09df56
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traits defines interfaces that a type may implement to participate
+// in operator overloads and function dispatch.
+package traits
+
+const (
+ // AdderType types provide a '+' operator overload.
+ AdderType = 1 << iota
+
+ // ComparerType types support ordering comparisons '<', '<=', '>', '>='.
+ ComparerType
+
+ // ContainerType types support 'in' operations.
+ ContainerType
+
+ // DividerType types support '/' operations.
+ DividerType
+
+ // FieldTesterType types support the detection of field value presence.
+ FieldTesterType
+
+ // IndexerType types support index access with dynamic values.
+ IndexerType
+
+ // IterableType types can be iterated over in comprehensions.
+ IterableType
+
+ // IteratorType types support iterator semantics.
+ IteratorType
+
+ // MatcherType types support pattern matching via 'matches' method.
+ MatcherType
+
+ // ModderType types support modulus operations '%'
+ ModderType
+
+ // MultiplierType types support '*' operations.
+ MultiplierType
+
+ // NegatorType types support either negation via '!' or '-'
+ NegatorType
+
+ // ReceiverType types support dynamic dispatch to instance methods.
+ ReceiverType
+
+ // SizerType types support the size() method.
+ SizerType
+
+ // SubtractorType types support '-' operations.
+ SubtractorType
+
+ // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs.
+ FoldableType
+)
+
+const (
+ // ListerType supports a set of traits necessary for list operations.
+ //
+ // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators.
+ ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType
+
+ // MapperType supports a set of traits necessary for map operations.
+ //
+ // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators.
+ MapperType = ContainerType | IndexerType | IterableType | SizerType
+)
diff --git a/vendor/github.com/google/cel-go/common/types/traits/zeroer.go b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
new file mode 100644
index 000000000..0b7c830a2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
@@ -0,0 +1,21 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+// Zeroer interface for testing whether a CEL value is a zero value for its type.
+type Zeroer interface {
+ // IsZeroValue indicates whether the object is the zero value for the type.
+ IsZeroValue() bool
+}
diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go
new file mode 100644
index 000000000..1c5b6c40c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/types.go
@@ -0,0 +1,864 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+
+ chkdecls "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ celpb "cel.dev/expr"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Kind indicates a CEL type's kind which is used to differentiate quickly between simple
+// and complex types.
+type Kind uint
+
+const (
+ // UnspecifiedKind is returned when the type is nil or its kind is not specified.
+ UnspecifiedKind Kind = iota
+
+ // DynKind represents a dynamic type. This kind only exists at type-check time.
+ DynKind
+
+ // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time.
+ // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf
+ // well-known types.
+ AnyKind
+
+ // BoolKind represents a boolean type.
+ BoolKind
+
+ // BytesKind represents a bytes type.
+ BytesKind
+
+ // DoubleKind represents a double type.
+ DoubleKind
+
+ // DurationKind represents a CEL duration type.
+ DurationKind
+
+ // ErrorKind represents a CEL error type.
+ ErrorKind
+
+ // IntKind represents an integer type.
+ IntKind
+
+ // ListKind represents a list type.
+ ListKind
+
+ // MapKind represents a map type.
+ MapKind
+
+ // NullTypeKind represents a null type.
+ NullTypeKind
+
+ // OpaqueKind represents an abstract type which has no accessible fields.
+ OpaqueKind
+
+ // StringKind represents a string type.
+ StringKind
+
+ // StructKind represents a structured object with typed fields.
+ StructKind
+
+ // TimestampKind represents a a CEL time type.
+ TimestampKind
+
+ // TypeKind represents the CEL type.
+ TypeKind
+
+ // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible.
+ TypeParamKind
+
+ // UintKind represents a uint type.
+ UintKind
+
+ // UnknownKind represents an unknown value type.
+ UnknownKind
+)
+
+var (
+ // AnyType represents the google.protobuf.Any type.
+ AnyType = &Type{
+ kind: AnyKind,
+ runtimeTypeName: "google.protobuf.Any",
+ traitMask: traits.FieldTesterType |
+ traits.IndexerType,
+ }
+ // BoolType represents the bool type.
+ BoolType = &Type{
+ kind: BoolKind,
+ runtimeTypeName: "bool",
+ traitMask: traits.ComparerType |
+ traits.NegatorType,
+ }
+ // BytesType represents the bytes type.
+ BytesType = &Type{
+ kind: BytesKind,
+ runtimeTypeName: "bytes",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.SizerType,
+ }
+ // DoubleType represents the double type.
+ DoubleType = &Type{
+ kind: DoubleKind,
+ runtimeTypeName: "double",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // DurationType represents the CEL duration type.
+ DurationType = &Type{
+ kind: DurationKind,
+ runtimeTypeName: "google.protobuf.Duration",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.NegatorType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // DynType represents a dynamic CEL type whose type will be determined at runtime from context.
+ DynType = &Type{
+ kind: DynKind,
+ runtimeTypeName: "dyn",
+ }
+ // ErrorType represents a CEL error value.
+ ErrorType = &Type{
+ kind: ErrorKind,
+ runtimeTypeName: "error",
+ }
+ // IntType represents the int type.
+ IntType = &Type{
+ kind: IntKind,
+ runtimeTypeName: "int",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.NegatorType |
+ traits.SubtractorType,
+ }
+ // ListType represents the runtime list type.
+ ListType = NewListType(nil)
+ // MapType represents the runtime map type.
+ MapType = NewMapType(nil, nil)
+ // NullType represents the type of a null value.
+ NullType = &Type{
+ kind: NullTypeKind,
+ runtimeTypeName: "null_type",
+ }
+ // StringType represents the string type.
+ StringType = &Type{
+ kind: StringKind,
+ runtimeTypeName: "string",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.MatcherType |
+ traits.ReceiverType |
+ traits.SizerType,
+ }
+ // TimestampType represents the time type.
+ TimestampType = &Type{
+ kind: TimestampKind,
+ runtimeTypeName: "google.protobuf.Timestamp",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.ReceiverType |
+ traits.SubtractorType,
+ }
+ // TypeType represents a CEL type
+ TypeType = &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ }
+ // UintType represents a uint type.
+ UintType = &Type{
+ kind: UintKind,
+ runtimeTypeName: "uint",
+ traitMask: traits.AdderType |
+ traits.ComparerType |
+ traits.DividerType |
+ traits.ModderType |
+ traits.MultiplierType |
+ traits.SubtractorType,
+ }
+ // UnknownType represents an unknown value type.
+ UnknownType = &Type{
+ kind: UnknownKind,
+ runtimeTypeName: "unknown",
+ }
+)
+
+var _ ref.Type = &Type{}
+var _ ref.Val = &Type{}
+
+// Type holds a reference to a runtime type with an optional type-checked set of type parameters.
+type Type struct {
+ // kind indicates general category of the type.
+ kind Kind
+
+ // parameters holds the optional type-checked set of type Parameters that are used during static analysis.
+ parameters []*Type
+
+ // runtimeTypeName indicates the runtime type name of the type.
+ runtimeTypeName string
+
+ // isAssignableType function determines whether one type is assignable to this type.
+ // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters.
+ isAssignableType func(other *Type) bool
+
+ // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type.
+ // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name.
+ isAssignableRuntimeType func(other ref.Val) bool
+
+ // traitMask is a mask of flags which indicate the capabilities of the type.
+ traitMask int
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion not supported for 'type'")
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *Type) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case TypeType:
+ return TypeType
+ case StringType:
+ return String(t.TypeName())
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
+}
+
+// Equal indicates whether two types have the same runtime type name.
+//
+// The name Equal is a bit of a misnomer, but for historical reasons, this is the
+// runtime behavior. For a more accurate definition see IsType().
+func (t *Type) Equal(other ref.Val) ref.Val {
+ otherType, ok := other.(ref.Type)
+ return Bool(ok && t.TypeName() == otherType.TypeName())
+}
+
+// HasTrait implements the ref.Type interface method.
+func (t *Type) HasTrait(trait int) bool {
+ return trait&t.traitMask == trait
+}
+
+// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names.
+func (t *Type) IsExactType(other *Type) bool {
+ return t.isTypeInternal(other, true)
+}
+
+// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names.
+func (t *Type) IsEquivalentType(other *Type) bool {
+ return t.isTypeInternal(other, false)
+}
+
+// Kind indicates general category of the type.
+func (t *Type) Kind() Kind {
+ if t == nil {
+ return UnspecifiedKind
+ }
+ return t.kind
+}
+
+// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag.
+func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool {
+ if t == nil {
+ return false
+ }
+ if t == other {
+ return true
+ }
+ if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) {
+ return false
+ }
+ if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() {
+ return false
+ }
+ for i, p := range t.Parameters() {
+ if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsAssignableType determines whether the current type is type-check assignable from the input fromType.
+func (t *Type) IsAssignableType(fromType *Type) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableType != nil {
+ return t.isAssignableType(fromType)
+ }
+ return t.defaultIsAssignableType(fromType)
+}
+
+// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType.
+//
+// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string)
+// will have a runtime assignable type of a map.
+func (t *Type) IsAssignableRuntimeType(val ref.Val) bool {
+ if t == nil {
+ return false
+ }
+ if t.isAssignableRuntimeType != nil {
+ return t.isAssignableRuntimeType(val)
+ }
+ return t.defaultIsAssignableRuntimeType(val)
+}
+
+// Parameters returns the list of type parameters if set.
+//
+// For ListKind, Parameters()[0] represents the list element type
+// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map
+// value type.
+func (t *Type) Parameters() []*Type {
+ if t == nil {
+ return emptyParams
+ }
+ return t.parameters
+}
+
+// DeclaredTypeName indicates the fully qualified and parameterized type-check type name.
+func (t *Type) DeclaredTypeName() string {
+ // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type.
+ if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) {
+ return fmt.Sprintf("wrapper(%s)", t.TypeName())
+ }
+ return t.TypeName()
+}
+
+// Type implements the ref.Val interface method.
+func (t *Type) Type() ref.Type {
+ return TypeType
+}
+
+// Value implements the ref.Val interface method.
+func (t *Type) Value() any {
+ return t.TypeName()
+}
+
+// TypeName returns the type-erased fully qualified runtime type name.
+//
+// TypeName implements the ref.Type interface method.
+func (t *Type) TypeName() string {
+ if t == nil {
+ return ""
+ }
+ return t.runtimeTypeName
+}
+
+// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter.
+//
+// This method should be used with Opaque types where the type acts like a container, e.g. vector.
+func (t *Type) WithTraits(traits int) *Type {
+ if t == nil {
+ return nil
+ }
+ return &Type{
+ kind: t.kind,
+ parameters: t.parameters,
+ runtimeTypeName: t.runtimeTypeName,
+ isAssignableType: t.isAssignableType,
+ isAssignableRuntimeType: t.isAssignableRuntimeType,
+ traitMask: traits,
+ }
+}
+
+// String returns a human-readable definition of the type name.
+func (t *Type) String() string {
+ if len(t.Parameters()) == 0 {
+ return t.DeclaredTypeName()
+ }
+ params := make([]string, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ params[i] = p.String()
+ }
+ return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", "))
+}
+
+// isDyn indicates whether the type is dynamic in any way.
+func (t *Type) isDyn() bool {
+ k := t.Kind()
+ return k == DynKind || k == AnyKind || k == TypeParamKind
+}
+
+// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another
+// where any of the following may return a true result:
+// - The from types are the same instance
+// - The target type is dynamic
+// - The fromType has the same kind and type name as the target type, and all parameters of the target type
+//
+// are IsAssignableType() from the parameters of the fromType.
+func (t *Type) defaultIsAssignableType(fromType *Type) bool {
+ if t == fromType || t.isDyn() {
+ return true
+ }
+ if t.Kind() != fromType.Kind() ||
+ t.TypeName() != fromType.TypeName() ||
+ len(t.Parameters()) != len(fromType.Parameters()) {
+ return false
+ }
+ for i, tp := range t.Parameters() {
+ fp := fromType.Parameters()[i]
+ if !tp.IsAssignableType(fp) {
+ return false
+ }
+ }
+ return true
+}
+
+// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types
+// to determine whether a ref.Val is assignable to the declared type for a function signature.
+func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool {
+ valType := val.Type()
+ // If the current type and value type don't agree, then return
+ if !(t.isDyn() || t.TypeName() == valType.TypeName()) {
+ return false
+ }
+ switch t.Kind() {
+ case ListKind:
+ elemType := t.Parameters()[0]
+ l := val.(traits.Lister)
+ if l.Size() == IntZero {
+ return true
+ }
+ it := l.Iterator()
+ elemVal := it.Next()
+ return elemType.IsAssignableRuntimeType(elemVal)
+ case MapKind:
+ keyType := t.Parameters()[0]
+ elemType := t.Parameters()[1]
+ m := val.(traits.Mapper)
+ if m.Size() == IntZero {
+ return true
+ }
+ it := m.Iterator()
+ keyVal := it.Next()
+ elemVal := m.Get(keyVal)
+ return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal)
+ }
+ return true
+}
+
+// NewListType creates an instances of a list type value with the provided element type.
+func NewListType(elemType *Type) *Type {
+ return &Type{
+ kind: ListKind,
+ parameters: []*Type{elemType},
+ runtimeTypeName: "list",
+ traitMask: traits.AdderType |
+ traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewMapType creates an instance of a map type value with the provided key and value types.
+func NewMapType(keyType, valueType *Type) *Type {
+ return &Type{
+ kind: MapKind,
+ parameters: []*Type{keyType, valueType},
+ runtimeTypeName: "map",
+ traitMask: traits.ContainerType |
+ traits.IndexerType |
+ traits.IterableType |
+ traits.SizerType,
+ }
+}
+
+// NewNullableType creates an instance of a nullable type with the provided wrapped type.
+//
+// Note: only primitive types are supported as wrapped types.
+func NewNullableType(wrapped *Type) *Type {
+ return &Type{
+ kind: wrapped.Kind(),
+ parameters: wrapped.Parameters(),
+ runtimeTypeName: wrapped.TypeName(),
+ traitMask: wrapped.traitMask,
+ isAssignableType: func(other *Type) bool {
+ return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other)
+ },
+ isAssignableRuntimeType: func(other ref.Val) bool {
+ return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other)
+ },
+ }
+}
+
+// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+func NewOptionalType(param *Type) *Type {
+ return NewOpaqueType("optional_type", param)
+}
+
+// NewOpaqueType creates an abstract parameterized type with a given name.
+func NewOpaqueType(name string, params ...*Type) *Type {
+ return &Type{
+ kind: OpaqueKind,
+ parameters: params,
+ runtimeTypeName: name,
+ }
+}
+
+// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type.
+//
+// An object type is assumed to support field presence testing and field indexing. Additionally, the
+// type may also indicate additional traits through the use of the optional traits vararg argument.
+func NewObjectType(typeName string, traits ...int) *Type {
+ // Function sanitizes object types on the fly
+ if wkt, found := checkedWellKnowns[typeName]; found {
+ return wkt
+ }
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: structTypeTraitMask | traitMask,
+ }
+}
+
+// NewObjectTypeValue creates a type reference to an externally defined type.
+//
+// Deprecated: use cel.ObjectType(typeName)
+func NewObjectTypeValue(typeName string) *Type {
+ return NewObjectType(typeName)
+}
+
+// NewTypeValue creates an opaque type which has a set of optional type traits as defined in
+// the common/types/traits package.
+//
+// Deprecated: use cel.ObjectType(typeName, traits)
+func NewTypeValue(typeName string, traits ...int) *Type {
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &Type{
+ kind: StructKind,
+ parameters: emptyParams,
+ runtimeTypeName: typeName,
+ traitMask: traitMask,
+ }
+}
+
+// NewTypeParamType creates a parameterized type instance.
+func NewTypeParamType(paramName string) *Type {
+ return &Type{
+ kind: TypeParamKind,
+ runtimeTypeName: paramName,
+ }
+}
+
+// NewTypeTypeWithParam creates a type with a type parameter.
+// Used for type-checking purposes, but equivalent to TypeType otherwise.
+func NewTypeTypeWithParam(param *Type) *Type {
+ return &Type{
+ kind: TypeKind,
+ runtimeTypeName: "type",
+ parameters: []*Type{param},
+ }
+}
+
+// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation.
+func TypeToExprType(t *Type) (*exprpb.Type, error) {
+ switch t.Kind() {
+ case AnyKind:
+ return chkdecls.Any, nil
+ case BoolKind:
+ return maybeWrapper(t, chkdecls.Bool), nil
+ case BytesKind:
+ return maybeWrapper(t, chkdecls.Bytes), nil
+ case DoubleKind:
+ return maybeWrapper(t, chkdecls.Double), nil
+ case DurationKind:
+ return chkdecls.Duration, nil
+ case DynKind:
+ return chkdecls.Dyn, nil
+ case ErrorKind:
+ return chkdecls.Error, nil
+ case IntKind:
+ return maybeWrapper(t, chkdecls.Int), nil
+ case ListKind:
+ if len(t.Parameters()) != 1 {
+ return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters()))
+ }
+ et, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewListType(et), nil
+ case MapKind:
+ if len(t.Parameters()) != 2 {
+ return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters()))
+ }
+ kt, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ vt, err := TypeToExprType(t.Parameters()[1])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewMapType(kt, vt), nil
+ case NullTypeKind:
+ return chkdecls.Null, nil
+ case OpaqueKind:
+ params := make([]*exprpb.Type, len(t.Parameters()))
+ for i, p := range t.Parameters() {
+ pt, err := TypeToExprType(p)
+ if err != nil {
+ return nil, err
+ }
+ params[i] = pt
+ }
+ return chkdecls.NewAbstractType(t.TypeName(), params...), nil
+ case StringKind:
+ return maybeWrapper(t, chkdecls.String), nil
+ case StructKind:
+ return chkdecls.NewObjectType(t.TypeName()), nil
+ case TimestampKind:
+ return chkdecls.Timestamp, nil
+ case TypeParamKind:
+ return chkdecls.NewTypeParamType(t.TypeName()), nil
+ case TypeKind:
+ if len(t.Parameters()) == 1 {
+ p, err := TypeToExprType(t.Parameters()[0])
+ if err != nil {
+ return nil, err
+ }
+ return chkdecls.NewTypeType(p), nil
+ }
+ return chkdecls.NewTypeType(nil), nil
+ case UintKind:
+ return maybeWrapper(t, chkdecls.Uint), nil
+ }
+ return nil, fmt.Errorf("missing type conversion to proto: %v", t)
+}
+
+// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation.
+func ExprTypeToType(t *exprpb.Type) (*Type, error) {
+ return AlphaProtoAsType(t)
+}
+
+// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation.
+func AlphaProtoAsType(t *exprpb.Type) (*Type, error) {
+ canonical := &celpb.Type{}
+ if err := convertProto(t, canonical); err != nil {
+ return nil, err
+ }
+ return ProtoAsType(canonical)
+}
+
+// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation.
+func ProtoAsType(t *celpb.Type) (*Type, error) {
+ switch t.GetTypeKind().(type) {
+ case *celpb.Type_Dyn:
+ return DynType, nil
+ case *celpb.Type_AbstractType_:
+ paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes()))
+ for i, p := range t.GetAbstractType().GetParameterTypes() {
+ pt, err := ProtoAsType(p)
+ if err != nil {
+ return nil, err
+ }
+ paramTypes[i] = pt
+ }
+ return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil
+ case *celpb.Type_ListType_:
+ et, err := ProtoAsType(t.GetListType().GetElemType())
+ if err != nil {
+ return nil, err
+ }
+ return NewListType(et), nil
+ case *celpb.Type_MapType_:
+ kt, err := ProtoAsType(t.GetMapType().GetKeyType())
+ if err != nil {
+ return nil, err
+ }
+ vt, err := ProtoAsType(t.GetMapType().GetValueType())
+ if err != nil {
+ return nil, err
+ }
+ return NewMapType(kt, vt), nil
+ case *celpb.Type_MessageType:
+ return NewObjectType(t.GetMessageType()), nil
+ case *celpb.Type_Null:
+ return NullType, nil
+ case *celpb.Type_Primitive:
+ switch t.GetPrimitive() {
+ case celpb.Type_BOOL:
+ return BoolType, nil
+ case celpb.Type_BYTES:
+ return BytesType, nil
+ case celpb.Type_DOUBLE:
+ return DoubleType, nil
+ case celpb.Type_INT64:
+ return IntType, nil
+ case celpb.Type_STRING:
+ return StringType, nil
+ case celpb.Type_UINT64:
+ return UintType, nil
+ default:
+ return nil, fmt.Errorf("unsupported primitive type: %v", t)
+ }
+ case *celpb.Type_TypeParam:
+ return NewTypeParamType(t.GetTypeParam()), nil
+ case *celpb.Type_Type:
+ if t.GetType().GetTypeKind() != nil {
+ p, err := ProtoAsType(t.GetType())
+ if err != nil {
+ return nil, err
+ }
+ return NewTypeTypeWithParam(p), nil
+ }
+ return TypeType, nil
+ case *celpb.Type_WellKnown:
+ switch t.GetWellKnown() {
+ case celpb.Type_ANY:
+ return AnyType, nil
+ case celpb.Type_DURATION:
+ return DurationType, nil
+ case celpb.Type_TIMESTAMP:
+ return TimestampType, nil
+ default:
+ return nil, fmt.Errorf("unsupported well-known type: %v", t)
+ }
+ case *celpb.Type_Wrapper:
+ t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}})
+ if err != nil {
+ return nil, err
+ }
+ return NewNullableType(t), nil
+ case *celpb.Type_Error:
+ return ErrorType, nil
+ default:
+ return nil, fmt.Errorf("unsupported type: %v", t)
+ }
+}
+
+func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type {
+ if t.IsAssignableType(NullType) {
+ return chkdecls.NewWrapperType(pbType)
+ }
+ return pbType
+}
+
+func maybeForeignType(t ref.Type) *Type {
+ if celType, ok := t.(*Type); ok {
+ return celType
+ }
+ // Inspect the incoming type to determine its traits. The assumption will be that the incoming
+ // type does not have any field values; however, if the trait mask indicates that field testing
+ // and indexing are supported, the foreign type is marked as a struct.
+ traitMask := 0
+ for _, trait := range allTraits {
+ if t.HasTrait(trait) {
+ traitMask |= trait
+ }
+ }
+ // Treat the value like a struct. If it has no fields, this is harmless to denote the type
+ // as such since it basically becomes an opaque type by convention.
+ return NewObjectType(t.TypeName(), traitMask)
+}
+
+func convertProto(src, dst proto.Message) error {
+ pb, err := proto.Marshal(src)
+ if err != nil {
+ return err
+ }
+ err = proto.Unmarshal(pb, dst)
+ return err
+}
+
+func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type {
+ return &celpb.Type{
+ TypeKind: &celpb.Type_Primitive{
+ Primitive: primitive,
+ },
+ }
+}
+
+var (
+ checkedWellKnowns = map[string]*Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": NewNullableType(BoolType),
+ "google.protobuf.BytesValue": NewNullableType(BytesType),
+ "google.protobuf.DoubleValue": NewNullableType(DoubleType),
+ "google.protobuf.FloatValue": NewNullableType(DoubleType),
+ "google.protobuf.Int64Value": NewNullableType(IntType),
+ "google.protobuf.Int32Value": NewNullableType(IntType),
+ "google.protobuf.UInt64Value": NewNullableType(UintType),
+ "google.protobuf.UInt32Value": NewNullableType(UintType),
+ "google.protobuf.StringValue": NewNullableType(StringType),
+ // Well-known types.
+ "google.protobuf.Any": AnyType,
+ "google.protobuf.Duration": DurationType,
+ "google.protobuf.Timestamp": TimestampType,
+ // Json types.
+ "google.protobuf.ListValue": NewListType(DynType),
+ "google.protobuf.NullValue": NullType,
+ "google.protobuf.Struct": NewMapType(StringType, DynType),
+ "google.protobuf.Value": DynType,
+ }
+
+ emptyParams = []*Type{}
+
+ allTraits = []int{
+ traits.AdderType,
+ traits.ComparerType,
+ traits.ContainerType,
+ traits.DividerType,
+ traits.FieldTesterType,
+ traits.IndexerType,
+ traits.IterableType,
+ traits.IteratorType,
+ traits.MatcherType,
+ traits.ModderType,
+ traits.MultiplierType,
+ traits.NegatorType,
+ traits.ReceiverType,
+ traits.SizerType,
+ traits.SubtractorType,
+ }
+
+ structTypeTraitMask = traits.FieldTesterType | traits.IndexerType
+
+ boolType = primitiveType(celpb.Type_BOOL)
+ bytesType = primitiveType(celpb.Type_BYTES)
+ doubleType = primitiveType(celpb.Type_DOUBLE)
+ intType = primitiveType(celpb.Type_INT64)
+ stringType = primitiveType(celpb.Type_STRING)
+ uintType = primitiveType(celpb.Type_UINT64)
+)
diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go
new file mode 100644
index 000000000..6d74f30d8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/uint.go
@@ -0,0 +1,256 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+
+ "github.com/google/cel-go/common/types/ref"
+
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// Uint type implementation which supports comparison and math operators.
+type Uint uint64
+
+var (
+ uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{})
+
+ uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{})
+)
+
+// Uint constants
+const (
+ uintZero = Uint(0)
+)
+
+// Add implements traits.Adder.Add.
+func (i Uint) Add(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := addUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Uint) Compare(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return NewErr("NaN values cannot be ordered")
+ }
+ return compareUintDouble(i, ov)
+ case Int:
+ return compareUintInt(i, ov)
+ case Uint:
+ return compareUint(i, ov)
+ default:
+ return MaybeNoSuchOverloadErr(other)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ switch typeDesc.Kind() {
+ case reflect.Uint, reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint8:
+ v, err := uint64ToUint8Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint16:
+ v, err := uint64ToUint16Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil
+ case reflect.Uint64:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Ptr:
+ switch typeDesc {
+ case anyValueType:
+ // Primitives must be wrapped before being set on an Any field.
+ return anypb.New(wrapperspb.UInt64(uint64(i)))
+ case jsonValueType:
+ // JSON can accurately represent 32-bit uints as floating point values.
+ if i.isJSONSafe() {
+ return structpb.NewNumberValue(float64(i)), nil
+ }
+ // Proto3 to JSON conversion requires string-formatted uint64 values
+ // since the conversion to floating point would result in truncation.
+ return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil
+ case uint32WrapperType:
+ // Convert the value to a wrapperspb.UInt32Value, error on overflow.
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ return wrapperspb.UInt32(v), nil
+ case uint64WrapperType:
+ // Convert the value to a wrapperspb.UInt64Value.
+ return wrapperspb.UInt64(uint64(i)), nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Uint32:
+ v, err := uint64ToUint32Checked(uint64(i))
+ if err != nil {
+ return 0, err
+ }
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ case reflect.Uint64:
+ v := uint64(i)
+ p := reflect.New(typeDesc.Elem())
+ p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem()))
+ return p.Interface(), nil
+ }
+ case reflect.Interface:
+ iv := i.Value()
+ if reflect.TypeOf(iv).Implements(typeDesc) {
+ return iv, nil
+ }
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ v, err := uint64ToInt64Checked(uint64(i))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Int(v)
+ case UintType:
+ return i
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", uint64(i)))
+ case TypeType:
+ return UintType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Uint) Divide(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ div, err := divideUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(div)
+}
+
+// Equal implements ref.Val.Equal.
+func (i Uint) Equal(other ref.Val) ref.Val {
+ switch ov := other.(type) {
+ case Double:
+ if math.IsNaN(float64(ov)) {
+ return False
+ }
+ return Bool(compareUintDouble(i, ov) == 0)
+ case Int:
+ return Bool(compareUintInt(i, ov) == 0)
+ case Uint:
+ return Bool(i == ov)
+ default:
+ return False
+ }
+}
+
+// IsZeroValue returns true if the uint is zero.
+func (i Uint) IsZeroValue() bool {
+ return i == 0
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Uint) Modulo(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(mod)
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Uint) Multiply(other ref.Val) ref.Val {
+ otherUint, ok := other.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(other)
+ }
+ val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
+ subtraUint, ok := subtrahend.(Uint)
+ if !ok {
+ return MaybeNoSuchOverloadErr(subtrahend)
+ }
+ val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
+ if err != nil {
+ return WrapErr(err)
+ }
+ return Uint(val)
+}
+
+// Type implements ref.Val.Type.
+func (i Uint) Type() ref.Type {
+ return UintType
+}
+
+// Value implements ref.Val.Value.
+func (i Uint) Value() any {
+ return uint64(i)
+}
+
+// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON.
+func (i Uint) isJSONSafe() bool {
+ return i <= maxIntJSON
+}
diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go
new file mode 100644
index 000000000..9dd2b2579
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -0,0 +1,326 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}}
+)
+
+// NewAttributeTrail creates a new simple attribute from a variable name.
+func NewAttributeTrail(variable string) *AttributeTrail {
+ if variable == "" {
+ return unspecifiedAttribute
+ }
+ return &AttributeTrail{variable: variable}
+}
+
+// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to
+// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable.
+//
+// The qualifer path elements adhere to the AttributeQualifier type constraint.
+type AttributeTrail struct {
+ variable string
+ qualifierPath []any
+}
+
+// Equal returns whether two attribute values have the same variable name and qualifier paths.
+func (a *AttributeTrail) Equal(other *AttributeTrail) bool {
+ if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) {
+ return false
+ }
+ for i, q := range a.QualifierPath() {
+ qual := other.QualifierPath()[i]
+ if !qualifiersEqual(q, qual) {
+ return false
+ }
+ }
+ return true
+}
+
+func qualifiersEqual(a, b any) bool {
+ if a == b {
+ return true
+ }
+ switch numA := a.(type) {
+ case int64:
+ numB, ok := b.(uint64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numA, numB)
+ case uint64:
+ numB, ok := b.(int64)
+ if !ok {
+ return false
+ }
+ return intUintEqual(numB, numA)
+ default:
+ return false
+ }
+}
+
+func intUintEqual(i int64, u uint64) bool {
+ if i < 0 || u > math.MaxInt64 {
+ return false
+ }
+ return i == int64(u)
+}
+
+// Variable returns the variable name associated with the attribute.
+func (a *AttributeTrail) Variable() string {
+ return a.variable
+}
+
+// QualifierPath returns the optional set of qualifying fields or indices applied to the variable.
+func (a *AttributeTrail) QualifierPath() []any {
+ return a.qualifierPath
+}
+
+// String returns the string representation of the Attribute.
+func (a *AttributeTrail) String() string {
+ if a.variable == "" {
+ return ""
+ }
+ var str strings.Builder
+ str.WriteString(a.variable)
+ for _, q := range a.qualifierPath {
+ switch q := q.(type) {
+ case bool, int64:
+ str.WriteString(fmt.Sprintf("[%v]", q))
+ case uint64:
+ str.WriteString(fmt.Sprintf("[%vu]", q))
+ case string:
+ if isIdentifierCharacter(q) {
+ str.WriteString(fmt.Sprintf(".%v", q))
+ } else {
+ str.WriteString(fmt.Sprintf("[%q]", q))
+ }
+ }
+ }
+ return str.String()
+}
+
+func isIdentifierCharacter(str string) bool {
+ for _, c := range str {
+ if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// AttributeQualifier constrains the possible types which may be used to qualify an attribute.
+type AttributeQualifier interface {
+ bool | int64 | uint64 | string
+}
+
+// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type.
+func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail {
+ attr.qualifierPath = append(attr.qualifierPath, qualifier)
+ return attr
+}
+
+// Unknown type which collects expression ids which caused the current value to become unknown.
+type Unknown struct {
+ attributeTrails map[int64][]*AttributeTrail
+}
+
+// NewUnknown creates a new unknown at a given expression id for an attribute.
+//
+// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`.
+func NewUnknown(id int64, attr *AttributeTrail) *Unknown {
+ if attr == nil {
+ attr = unspecifiedAttribute
+ }
+ return &Unknown{
+ attributeTrails: map[int64][]*AttributeTrail{id: {attr}},
+ }
+}
+
+// IDs returns the set of unknown expression ids contained by this value.
+//
+// Numeric identifiers are guaranteed to be in sorted order.
+func (u *Unknown) IDs() []int64 {
+ ids := make(int64Slice, len(u.attributeTrails))
+ i := 0
+ for id := range u.attributeTrails {
+ ids[i] = id
+ i++
+ }
+ ids.Sort()
+ return ids
+}
+
+// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id.
+func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) {
+ trails, found := u.attributeTrails[id]
+ return trails, found
+}
+
+// Contains returns true if the input unknown is a subset of the current unknown.
+func (u *Unknown) Contains(other *Unknown) bool {
+ for id, otherTrails := range other.attributeTrails {
+ trails, found := u.attributeTrails[id]
+ if !found || len(otherTrails) != len(trails) {
+ return false
+ }
+ for _, ot := range otherTrails {
+ found := false
+ for _, t := range trails {
+ if t.Equal(ot) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return u.Value(), nil
+}
+
+// ConvertToType is an identity function since unknown values cannot be modified.
+func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val {
+ return u
+}
+
+// Equal is an identity function since unknown values cannot be modified.
+func (u *Unknown) Equal(other ref.Val) ref.Val {
+ return u
+}
+
+// String implements the Stringer interface
+func (u *Unknown) String() string {
+ var str strings.Builder
+ for id, attrs := range u.attributeTrails {
+ if str.Len() != 0 {
+ str.WriteString(", ")
+ }
+ if len(attrs) == 1 {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id))
+ } else {
+ str.WriteString(fmt.Sprintf("%v (%d)", attrs, id))
+ }
+ }
+ return str.String()
+}
+
+// Type implements ref.Val.Type.
+func (u *Unknown) Type() ref.Type {
+ return UnknownType
+}
+
+// Value implements ref.Val.Value.
+func (u *Unknown) Value() any {
+ return u
+}
+
+// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown
+func IsUnknown(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown:
+ return true
+ default:
+ return false
+ }
+}
+
+// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce
+// an unknown result.
+//
+// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input
+// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil.
+// If both values are non-nil and unknown, then the return value will be a merge of both unknowns.
+func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) {
+ src, isUnk := val.(*Unknown)
+ if !isUnk {
+ if unk != nil {
+ return unk, true
+ }
+ return unk, false
+ }
+ return MergeUnknowns(src, unk), true
+}
+
+// MergeUnknowns combines two unknown values into a new unknown value.
+func MergeUnknowns(unk1, unk2 *Unknown) *Unknown {
+ if unk1 == nil {
+ return unk2
+ }
+ if unk2 == nil {
+ return unk1
+ }
+ out := &Unknown{
+ attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)),
+ }
+ for id, ats := range unk1.attributeTrails {
+ out.attributeTrails[id] = ats
+ }
+ for id, ats := range unk2.attributeTrails {
+ existing, found := out.attributeTrails[id]
+ if !found {
+ out.attributeTrails[id] = ats
+ continue
+ }
+
+ for _, at := range ats {
+ found := false
+ for _, et := range existing {
+ if at.Equal(et) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ existing = append(existing, at)
+ }
+ }
+ out.attributeTrails[id] = existing
+ }
+ return out
+}
+
+// int64Slice is an implementation of the sort.Interface
+type int64Slice []int64
+
+// Len returns the number of elements in the slice.
+func (x int64Slice) Len() int { return len(x) }
+
+// Less indicates whether the value at index i is less than the value at index j.
+func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] }
+
+// Swap swaps the values at indices i and j in place.
+func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// Sort is a convenience method: x.Sort() calls Sort(x).
+func (x int64Slice) Sort() { sort.Sort(x) }
diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go
new file mode 100644
index 000000000..71662eee3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/util.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType.
+func IsUnknownOrError(val ref.Val) bool {
+ switch val.(type) {
+ case *Unknown, *Err:
+ return true
+ }
+ return false
+}
+
+// IsPrimitiveType returns whether the input element ref.Val is a primitive type.
+// Note, primitive types do not include well-known types such as Duration and Timestamp.
+func IsPrimitiveType(val ref.Val) bool {
+ switch val.Type() {
+ case BoolType, BytesType, DoubleType, IntType, StringType, UintType:
+ return true
+ }
+ return false
+}
+
+// Equal returns whether the two ref.Value are heterogeneously equivalent.
+func Equal(lhs ref.Val, rhs ref.Val) ref.Val {
+ lNull := lhs == NullValue
+ rNull := rhs == NullValue
+ if lNull || rNull {
+ return Bool(lNull == rNull)
+ }
+ return lhs.Equal(rhs)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
new file mode 100644
index 000000000..220e23d47
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
@@ -0,0 +1,74 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ default_visibility = ["//visibility:public"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "activation.go",
+ "attribute_patterns.go",
+ "attributes.go",
+ "decorators.go",
+ "dispatcher.go",
+ "evalstate.go",
+ "interpretable.go",
+ "interpreter.go",
+ "optimizations.go",
+ "planner.go",
+ "prune.go",
+ "runtimecost.go",
+ ],
+ importpath = "github.com/google/cel-go/interpreter",
+ deps = [
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/overloads:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/durationpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ "@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ srcs = [
+ "activation_test.go",
+ "attribute_patterns_test.go",
+ "attributes_test.go",
+ "interpreter_test.go",
+ "prune_test.go",
+ "runtimecost_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//checker:go_default_library",
+ "//common/containers:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/decls:go_default_library",
+ "//common/functions:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/stdlib:go_default_library",
+ "//common/types:go_default_library",
+ "//parser:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/anypb:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go
new file mode 100644
index 000000000..1577f3590
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -0,0 +1,168 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Activation used to resolve identifiers by name and references by id.
+//
+// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
+type Activation interface {
+ // ResolveName returns a value from the activation by qualified name, or false if the name
+ // could not be found.
+ ResolveName(name string) (any, bool)
+
+ // Parent returns the parent of the current activation, may be nil.
+ // If non-nil, the parent will be searched during resolve calls.
+ Parent() Activation
+}
+
+// EmptyActivation returns a variable-free activation.
+func EmptyActivation() Activation {
+ return emptyActivation{}
+}
+
+// emptyActivation is a variable-free activation.
+type emptyActivation struct{}
+
+func (emptyActivation) ResolveName(string) (any, bool) { return nil, false }
+func (emptyActivation) Parent() Activation { return nil }
+
+// NewActivation returns an activation based on a map-based binding where the map keys are
+// expected to be qualified names used with ResolveName calls.
+//
+// The input `bindings` may either be of type `Activation` or `map[string]any`.
+//
+// Lazy bindings may be supplied within the map-based input in either of the following forms:
+// - func() any
+// - func() ref.Val
+//
+// The output of the lazy binding will overwrite the variable reference in the internal map.
+//
+// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
+// the types.Adapter configured in the environment.
+func NewActivation(bindings any) (Activation, error) {
+ if bindings == nil {
+ return nil, errors.New("bindings must be non-nil")
+ }
+ a, isActivation := bindings.(Activation)
+ if isActivation {
+ return a, nil
+ }
+ m, isMap := bindings.(map[string]any)
+ if !isMap {
+ return nil, fmt.Errorf(
+ "activation input must be an activation or map[string]interface: got %T",
+ bindings)
+ }
+ return &mapActivation{bindings: m}, nil
+}
+
+// mapActivation which implements Activation and maps of named values.
+//
+// Named bindings may lazily supply values by providing a function which accepts no arguments and
+// produces an interface value.
+type mapActivation struct {
+ bindings map[string]any
+}
+
+// Parent implements the Activation interface method.
+func (a *mapActivation) Parent() Activation {
+ return nil
+}
+
+// ResolveName implements the Activation interface method.
+func (a *mapActivation) ResolveName(name string) (any, bool) {
+ obj, found := a.bindings[name]
+ if !found {
+ return nil, false
+ }
+ fn, isLazy := obj.(func() ref.Val)
+ if isLazy {
+ obj = fn()
+ a.bindings[name] = obj
+ }
+ fnRaw, isLazy := obj.(func() any)
+ if isLazy {
+ obj = fnRaw()
+ a.bindings[name] = obj
+ }
+ return obj, found
+}
+
+// hierarchicalActivation which implements Activation and contains a parent and
+// child activation.
+type hierarchicalActivation struct {
+ parent Activation
+ child Activation
+}
+
+// Parent implements the Activation interface method.
+func (a *hierarchicalActivation) Parent() Activation {
+ return a.parent
+}
+
+// ResolveName implements the Activation interface method.
+func (a *hierarchicalActivation) ResolveName(name string) (any, bool) {
+ if object, found := a.child.ResolveName(name); found {
+ return object, found
+ }
+ return a.parent.ResolveName(name)
+}
+
+// NewHierarchicalActivation takes two activations and produces a new one which prioritizes
+// resolution in the child first and parent(s) second.
+func NewHierarchicalActivation(parent Activation, child Activation) Activation {
+ return &hierarchicalActivation{parent, child}
+}
+
+// NewPartialActivation returns an Activation which contains a list of AttributePattern values
+// representing field and index operations that should result in a 'types.Unknown' result.
+//
+// The `bindings` value may be any value type supported by the interpreter.NewActivation call,
+// but is typically either an existing Activation or map[string]any.
+func NewPartialActivation(bindings any,
+ unknowns ...*AttributePattern) (PartialActivation, error) {
+ a, err := NewActivation(bindings)
+ if err != nil {
+ return nil, err
+ }
+ return &partActivation{Activation: a, unknowns: unknowns}, nil
+}
+
+// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns.
+type PartialActivation interface {
+ Activation
+
+ // UnknownAttributePaths returns a set of AttributePattern values which match Attribute
+ // expressions for data accesses whose values are not yet known.
+ UnknownAttributePatterns() []*AttributePattern
+}
+
+// partActivation is the default implementations of the PartialActivation interface.
+type partActivation struct {
+ Activation
+ unknowns []*AttributePattern
+}
+
+// UnknownAttributePatterns implements the PartialActivation interface method.
+func (a *partActivation) UnknownAttributePatterns() []*AttributePattern {
+ return a.unknowns
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
new file mode 100644
index 000000000..8f19bde7e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
@@ -0,0 +1,397 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// AttributePattern represents a top-level variable with an optional set of qualifier patterns.
+//
+// When using a CEL expression within a container, e.g. a package or namespace, the variable name
+// in the pattern must match the qualified name produced during the variable namespace resolution.
+// For example, if variable `c` appears in an expression whose container is `a.b`, the variable
+// name supplied to the pattern must be `a.b.c`
+//
+// The qualifier patterns for attribute matching must be one of the following:
+//
+// - valid map key type: string, int, uint, bool
+// - wildcard (*)
+//
+// Examples:
+//
+// 1. ns.myvar["complex-value"]
+// 2. ns.myvar["complex-value"][0]
+// 3. ns.myvar["complex-value"].*.name
+//
+// The first example is simple: match an attribute where the variable is 'ns.myvar' with a
+// field access on 'complex-value'. The second example expands the match to indicate that only
+// a specific index `0` should match. And lastly, the third example matches any indexed access
+// that later selects the 'name' field.
+type AttributePattern struct {
+ variable string
+ qualifierPatterns []*AttributeQualifierPattern
+}
+
+// NewAttributePattern produces a new mutable AttributePattern based on a variable name.
+func NewAttributePattern(variable string) *AttributePattern {
+ return &AttributePattern{
+ variable: variable,
+ qualifierPatterns: []*AttributeQualifierPattern{},
+ }
+}
+
+// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid
+// identifier, or string map key including empty string.
+func (apat *AttributePattern) QualString(pattern string) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or
+// list index.
+func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern.
+func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern.
+func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{value: pattern})
+ return apat
+}
+
+// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier.
+func (apat *AttributePattern) Wildcard() *AttributePattern {
+ apat.qualifierPatterns = append(apat.qualifierPatterns,
+ &AttributeQualifierPattern{wildcard: true})
+ return apat
+}
+
+// VariableMatches returns true if the fully qualified variable matches the AttributePattern
+// fully qualified variable name.
+func (apat *AttributePattern) VariableMatches(variable string) bool {
+ return apat.variable == variable
+}
+
+// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern.
+func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern {
+ return apat.qualifierPatterns
+}
+
+// AttributeQualifierPattern holds a wildcard or valued qualifier pattern.
+type AttributeQualifierPattern struct {
+ wildcard bool
+ value any
+}
+
+// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the
+// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern.
+func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool {
+ if qpat.wildcard {
+ return true
+ }
+ qve, ok := q.(qualifierValueEquator)
+ return ok && qve.QualifierValueEquals(qpat.value)
+}
+
+// qualifierValueEquator defines an interface for determining if an input value, of valid map key
+// type, is equal to the value held in the Qualifier. This interface is used by the
+// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns.
+//
+// Note: Attribute values are also Qualifier values; however, Attributes are resolved before
+// qualification happens. This is an implementation detail, but one relevant to why the Attribute
+// types do not surface in the list of implementations.
+//
+// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is
+// used.
+type qualifierValueEquator interface {
+ // QualifierValueEquals returns true if the input value is equal to the value held in the
+ // Qualifier.
+ QualifierValueEquals(value any) bool
+}
+
+// QualifierValueEquals implementation for boolean qualifiers.
+func (q *boolQualifier) QualifierValueEquals(value any) bool {
+ bval, ok := value.(bool)
+ return ok && q.value == bval
+}
+
+// QualifierValueEquals implementation for field qualifiers.
+func (q *fieldQualifier) QualifierValueEquals(value any) bool {
+ sval, ok := value.(string)
+ return ok && q.Name == sval
+}
+
+// QualifierValueEquals implementation for string qualifiers.
+func (q *stringQualifier) QualifierValueEquals(value any) bool {
+ sval, ok := value.(string)
+ return ok && q.value == sval
+}
+
+// QualifierValueEquals implementation for int qualifiers.
+func (q *intQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// QualifierValueEquals implementation for uint qualifiers.
+func (q *uintQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// QualifierValueEquals implementation for double qualifiers.
+func (q *doubleQualifier) QualifierValueEquals(value any) bool {
+ return numericValueEquals(value, q.celValue)
+}
+
+// numericValueEquals uses CEL equality to determine whether two number values are
+func numericValueEquals(value any, celValue ref.Val) bool {
+ val := types.DefaultTypeAdapter.NativeToValue(value)
+ return celValue.Equal(val) == types.True
+}
+
+// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing
+// AttributePattern matches with PartialActivation inputs.
+func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory {
+ fac := NewAttributeFactory(container, adapter, provider, opts...)
+ return &partialAttributeFactory{
+ AttributeFactory: fac,
+ container: container,
+ adapter: adapter,
+ provider: provider,
+ }
+}
+
+type partialAttributeFactory struct {
+ AttributeFactory
+ container *containers.Container
+ adapter types.Adapter
+ provider types.Provider
+}
+
+// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the
+// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match
+// unknown patterns from PartialActivation inputs if given.
+func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
+ attr := fac.AttributeFactory.AbsoluteAttribute(id, names...)
+ return &attributeMatcher{fac: fac, NamespacedAttribute: attr}
+}
+
+// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of
+// 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than
+// the base AttributeFactory implementation.
+func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute {
+ return &maybeAttribute{
+ id: id,
+ attrs: []NamespacedAttribute{
+ fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...),
+ },
+ adapter: fac.adapter,
+ provider: fac.provider,
+ fac: fac,
+ }
+}
+
+// matchesUnknownPatterns returns true if the variable names and qualifiers for a given
+// Attribute value match any of the ActivationPattern objects in the set of unknown activation
+// patterns on the given PartialActivation.
+//
+// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string
+// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data
+// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern
+// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any
+// of the following patterns will match Attribute `a.b`:
+//
+// - `AttributePattern("a")`
+// - `AttributePattern("a").Wildcard()`
+// - `AttributePattern("a").QualString("b")`
+// - `AttributePattern("a").QualString("b").QualInt(0)`
+//
+// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result
+// for the last pattern matched variable or qualifier in the Attribute. In the first matching
+// example, the expression id representing variable `a` would be listed in the Unknown result,
+// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown.
+func (fac *partialAttributeFactory) matchesUnknownPatterns(
+ vars PartialActivation,
+ attrID int64,
+ variableNames []string,
+ qualifiers []Qualifier) (*types.Unknown, error) {
+ patterns := vars.UnknownAttributePatterns()
+ candidateIndices := map[int]struct{}{}
+ for _, variable := range variableNames {
+ for i, pat := range patterns {
+ if pat.VariableMatches(variable) {
+ if len(qualifiers) == 0 {
+ return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil
+ }
+ candidateIndices[i] = struct{}{}
+ }
+ }
+ }
+ // Determine whether to return early if there are no candidate unknown patterns.
+ if len(candidateIndices) == 0 {
+ return nil, nil
+ }
+ // Resolve the attribute qualifiers into a static set. This prevents more dynamic
+ // Attribute resolutions than necessary when there are multiple unknown patterns
+ // that traverse the same Attribute-based qualifier field.
+ newQuals := make([]Qualifier, len(qualifiers))
+ for i, qual := range qualifiers {
+ attr, isAttr := qual.(Attribute)
+ if isAttr {
+ val, err := attr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ // If this resolution behavior ever changes, new implementations of the
+ // qualifierValueEquator may be required to handle proper resolution.
+ qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ }
+ newQuals[i] = qual
+ }
+ // Determine whether any of the unknown patterns match.
+ for patIdx := range candidateIndices {
+ pat := patterns[patIdx]
+ isUnk := true
+ matchExprID := attrID
+ qualPats := pat.QualifierPatterns()
+ for i, qual := range newQuals {
+ if i >= len(qualPats) {
+ break
+ }
+ matchExprID = qual.ID()
+ qualPat := qualPats[i]
+ // Note, the AttributeQualifierPattern relies on the input Qualifier not being an
+ // Attribute, since there is no way to resolve the Attribute with the information
+ // provided to the Matches call.
+ if !qualPat.Matches(qual) {
+ isUnk = false
+ break
+ }
+ }
+ if isUnk {
+ attr := types.NewAttributeTrail(pat.variable)
+ for i := 0; i < len(qualPats) && i < len(newQuals); i++ {
+ if qual, ok := newQuals[i].(ConstantQualifier); ok {
+ switch v := qual.Value().Value().(type) {
+ case bool:
+ types.QualifyAttribute[bool](attr, v)
+ case float64:
+ types.QualifyAttribute[int64](attr, int64(v))
+ case int64:
+ types.QualifyAttribute[int64](attr, v)
+ case string:
+ types.QualifyAttribute[string](attr, v)
+ case uint64:
+ types.QualifyAttribute[uint64](attr, v)
+ default:
+ types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v))
+ }
+ } else {
+ types.QualifyAttribute[string](attr, "*")
+ }
+ }
+ return types.NewUnknown(matchExprID, attr), nil
+ }
+ }
+ return nil, nil
+}
+
+// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in
+// AttributePattern matching against Attribute values without having to modify the code paths that
+// identify Attributes in expressions.
+type attributeMatcher struct {
+ NamespacedAttribute
+ qualifiers []Qualifier
+ fac *partialAttributeFactory
+}
+
+// AddQualifier implements the Attribute interface method.
+func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) {
+ // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve
+ // method is not a PartialActivation, or does not match an unknown attribute pattern, the
+ // Resolve method is directly invoked on the underlying NamespacedAttribute.
+ _, err := m.NamespacedAttribute.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against
+ // the variable name and qualifier set contained within the Attribute. These values are not
+ // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within
+ // the attributeMatcher.
+ m.qualifiers = append(m.qualifiers, qual)
+ return m, nil
+}
+
+// Resolve is an implementation of the NamespacedAttribute interface method which tests
+// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise,
+// the standard Resolve logic applies.
+func (m *attributeMatcher) Resolve(vars Activation) (any, error) {
+ id := m.NamespacedAttribute.ID()
+ // Bug in how partial activation is resolved, should search parents as well.
+ partial, isPartial := toPartialActivation(vars)
+ if isPartial {
+ unk, err := m.fac.matchesUnknownPatterns(
+ partial,
+ id,
+ m.CandidateVariableNames(),
+ m.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if unk != nil {
+ return unk, nil
+ }
+ }
+ return m.NamespacedAttribute.Resolve(vars)
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(m.fac, vars, obj, m)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly)
+}
+
+func toPartialActivation(vars Activation) (PartialActivation, bool) {
+ pv, ok := vars.(PartialActivation)
+ if ok {
+ return pv, true
+ }
+ if vars.Parent() != nil {
+ return toPartialActivation(vars.Parent())
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go
new file mode 100644
index 000000000..b1b3aacc8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/attributes.go
@@ -0,0 +1,1436 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// AttributeFactory provides methods creating Attribute and Qualifier values.
+type AttributeFactory interface {
+ // AbsoluteAttribute creates an attribute that refers to a top-level variable name.
+ //
+ // Checked expressions generate absolute attribute with a single name.
+ // Parse-only expressions may have more than one possible absolute identifier when the
+ // expression is created within a container, e.g. package or namespace.
+ //
+ // When there is more than one name supplied to the AbsoluteAttribute call, the names
+ // must be in CEL's namespace resolution order. The name arguments provided here are
+ // returned in the same order as they were provided by the NamespacedAttribute
+ // CandidateVariableNames method.
+ AbsoluteAttribute(id int64, names ...string) NamespacedAttribute
+
+ // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute
+ // that is resolved depends on the boolean evaluation of the input 'expr'.
+ ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute
+
+ // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced
+ // variable name.
+ //
+ // Only expressions which have not been type-checked may generate oneof attributes.
+ MaybeAttribute(id int64, name string) Attribute
+
+ // RelativeAttribute creates an attribute whose value is a qualification of a dynamic
+ // computation rather than a static variable reference.
+ RelativeAttribute(id int64, operand Interpretable) Attribute
+
+ // NewQualifier creates a qualifier on the target object with a given value.
+ //
+ // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint.
+ //
+ // The qualifier may consider the object type being qualified, if present. If absent, the
+ // qualification should be considered dynamic and the qualification should still work, though
+ // it may be sub-optimal.
+ NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error)
+}
+
+// Qualifier marker interface for designating different qualifier values and where they appear
+// within field selections and index call expressions (`_[_]`).
+type Qualifier interface {
+ // ID where the qualifier appears within an expression.
+ ID() int64
+
+ // IsOptional specifies whether the qualifier is optional.
+ // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent
+ // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if
+ // the object to qualify is itself optional.
+ IsOptional() bool
+
+ // Qualify performs a qualification, e.g. field selection, on the input object and returns
+ // the value of the access and whether the value was set. A non-nil value with a false presence
+ // test result indicates that the value being returned is the default value.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+}
+
+// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the
+// qualifier's constant value.
+//
+// Non-constant qualifiers are of Attribute type.
+type ConstantQualifier interface {
+ Qualifier
+
+ // Value returns the constant value associated with the qualifier.
+ Value() ref.Val
+}
+
+// Attribute values are a variable or value with an optional set of qualifiers, such as field, key,
+// or index accesses.
+type Attribute interface {
+ Qualifier
+
+ // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type.
+ AddQualifier(Qualifier) (Attribute, error)
+
+ // Resolve returns the value of the Attribute and whether it was present given an Activation.
+ // For objects which support safe traversal, the value may be non-nil and the presence flag be false.
+ //
+ // If an error is encountered during attribute resolution, it will be returned immediately.
+ // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error`
+ // with the error indicating which variable was missing.
+ Resolve(Activation) (any, error)
+}
+
+// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers
+// such as field, key, or index accesses.
+type NamespacedAttribute interface {
+ Attribute
+
+ // CandidateVariableNames returns the possible namespaced variable names for this Attribute in
+ // the CEL namespace resolution order.
+ CandidateVariableNames() []string
+
+ // Qualifiers returns the list of qualifiers associated with the Attribute.
+ Qualifiers() []Qualifier
+}
+
+// AttrFactoryOption specifies a functional option for configuring an attribute factory.
+type AttrFactoryOption func(*attrFactory) *attrFactory
+
+// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection
+// is performed on a primitive type.
+func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption {
+ return func(fac *attrFactory) *attrFactory {
+ fac.errorOnBadPresenceTest = value
+ return fac
+ }
+}
+
+// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
+// capable of resolving types by simple names and qualify the values using the supported qualifier
+// types: bool, int, string, and uint.
+func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory {
+ fac := &attrFactory{
+ container: cont,
+ adapter: a,
+ provider: p,
+ }
+ for _, o := range opts {
+ fac = o(fac)
+ }
+ return fac
+}
+
+type attrFactory struct {
+ container *containers.Container
+ adapter types.Adapter
+ provider types.Provider
+
+ errorOnBadPresenceTest bool
+}
+
+// AbsoluteAttribute refers to a variable value and an optional qualifier path.
+//
+// The namespaceNames represent the names the variable could have based on namespace
+// resolution rules.
+func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
+ return &absoluteAttribute{
+ id: id,
+ namespaceNames: names,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ provider: r.provider,
+ fac: r,
+ errorOnBadPresenceTest: r.errorOnBadPresenceTest,
+ }
+}
+
+// ConditionalAttribute supports the case where an attribute selection may occur on a conditional
+// expression, e.g. (cond ? a : b).c
+func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute {
+ return &conditionalAttribute{
+ id: id,
+ expr: expr,
+ truthy: t,
+ falsy: f,
+ adapter: r.adapter,
+ fac: r,
+ }
+}
+
+// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be
+// direct variable accesses or some combination of variable access with qualification.
+func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute {
+ return &maybeAttribute{
+ id: id,
+ attrs: []NamespacedAttribute{
+ r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...),
+ },
+ adapter: r.adapter,
+ provider: r.provider,
+ fac: r,
+ }
+}
+
+// RelativeAttribute refers to an expression and an optional qualifier path.
+func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute {
+ return &relativeAttribute{
+ id: id,
+ operand: operand,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ fac: r,
+ errorOnBadPresenceTest: r.errorOnBadPresenceTest,
+ }
+}
+
+// NewQualifier is an implementation of the AttributeFactory interface.
+func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) {
+ // Before creating a new qualifier check to see if this is a protobuf message field access.
+ // If so, use the precomputed GetFrom qualification method rather than the standard
+ // stringQualifier.
+ str, isStr := val.(string)
+ if isStr && objType != nil && objType.Kind() == types.StructKind {
+ ft, found := r.provider.FindStructFieldType(objType.TypeName(), str)
+ if found && ft.IsSet != nil && ft.GetFrom != nil {
+ return &fieldQualifier{
+ id: qualID,
+ Name: str,
+ FieldType: ft,
+ adapter: r.adapter,
+ optional: opt,
+ }, nil
+ }
+ }
+ return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest)
+}
+
+type absoluteAttribute struct {
+ id int64
+ // namespaceNames represent the names the variable could have based on declared container
+ // (package) of the expression.
+ namespaceNames []string
+ qualifiers []Qualifier
+ adapter types.Adapter
+ provider types.Provider
+ fac AttributeFactory
+
+ errorOnBadPresenceTest bool
+}
+
+// ID implements the Attribute interface method.
+func (a *absoluteAttribute) ID() int64 {
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *absoluteAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier implements the Attribute interface method.
+func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ a.qualifiers = append(a.qualifiers, qual)
+ return a, nil
+}
+
+// CandidateVariableNames implements the NamespaceAttribute interface method.
+func (a *absoluteAttribute) CandidateVariableNames() []string {
+ return a.namespaceNames
+}
+
+// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute.
+func (a *absoluteAttribute) Qualifiers() []Qualifier {
+ return a.qualifiers
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// String implements the Stringer interface method.
+func (a *absoluteAttribute) String() string {
+ return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames)
+}
+
+// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
+// variable is not found, or if its Qualifiers cannot be applied successfully.
+//
+// If the variable name cannot be found as an Activation variable or in the TypeProvider as
+// a type, then the result is `nil`, `error` with the error indicating the name of the first
+// variable searched as missing.
+func (a *absoluteAttribute) Resolve(vars Activation) (any, error) {
+ for _, nm := range a.namespaceNames {
+ // If the variable is found, process it. Otherwise, wait until the checks to
+ // determine whether the type is unknown before returning.
+ obj, found := vars.ResolveName(nm)
+ if found {
+ if celErr, ok := obj.(*types.Err); ok {
+ return nil, celErr.Unwrap()
+ }
+ obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return types.OptionalOf(val), nil
+ }
+ return obj, nil
+ }
+ // Attempt to resolve the qualified type name if the name is not a variable identifier.
+ typ, found := a.provider.FindIdent(nm)
+ if found {
+ if len(a.qualifiers) == 0 {
+ return typ, nil
+ }
+ }
+ }
+ var attrNames strings.Builder
+ for i, nm := range a.namespaceNames {
+ if i != 0 {
+ attrNames.WriteString(", ")
+ }
+ attrNames.WriteString(nm)
+ }
+ return nil, missingAttribute(attrNames.String())
+}
+
+type conditionalAttribute struct {
+ id int64
+ expr Interpretable
+ truthy Attribute
+ falsy Attribute
+ adapter types.Adapter
+ fac AttributeFactory
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *conditionalAttribute) ID() int64 {
+ // There's a field access after the conditional.
+ if a.truthy.ID() == a.falsy.ID() {
+ return a.truthy.ID()
+ }
+ // Otherwise return the conditional id as the consistent id being tracked.
+ return a.id
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *conditionalAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing
+// the qualification of alternate attributes.
+func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ _, err := a.truthy.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ _, err = a.falsy.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly.
+func (a *conditionalAttribute) Resolve(vars Activation) (any, error) {
+ val := a.expr.Eval(vars)
+ if val == types.True {
+ return a.truthy.Resolve(vars)
+ }
+ if val == types.False {
+ return a.falsy.Resolve(vars)
+ }
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err)
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *conditionalAttribute) String() string {
+ return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy)
+}
+
+type maybeAttribute struct {
+ id int64
+ attrs []NamespacedAttribute
+ adapter types.Adapter
+ provider types.Provider
+ fac AttributeFactory
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *maybeAttribute) ID() int64 {
+ return a.attrs[0].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *maybeAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier adds a qualifier to each possible attribute variant, and also creates
+// a new namespaced variable from the qualified value.
+//
+// The algorithm for building the maybe attribute is as follows:
+//
+// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression
+//
+// mb = MaybeAttribute(, "a")
+//
+// Initializing the maybe attribute creates an absolute attribute internally which includes the
+// possible namespaced names of the attribute. In this example, let's assume we are in namespace
+// 'ns', then the maybe is either one of the following variable names:
+//
+// possible variables names -- ns.a, a
+//
+// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified
+// name, or a field selection on one of the possible variable names produced earlier:
+//
+// mb.AddQualifier("b")
+//
+// possible variables names -- ns.a.b, a.b
+// possible field selection -- ns.a['b'], a['b']
+//
+// If none of the attributes within the maybe resolves a value, the result is an error.
+func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ str := ""
+ isStr := false
+ cq, isConst := qual.(ConstantQualifier)
+ if isConst {
+ str, isStr = cq.Value().Value().(string)
+ }
+ var augmentedNames []string
+ // First add the qualifier to all existing attributes in the oneof.
+ for _, attr := range a.attrs {
+ if isStr && len(attr.Qualifiers()) == 0 {
+ candidateVars := attr.CandidateVariableNames()
+ augmentedNames = make([]string, len(candidateVars))
+ for i, name := range candidateVars {
+ augmentedNames[i] = fmt.Sprintf("%s.%s", name, str)
+ }
+ }
+ _, err := attr.AddQualifier(qual)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Next, ensure the most specific variable / type reference is searched first.
+ if len(augmentedNames) != 0 {
+ a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...)
+ }
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve follows the variable resolution rules to determine whether the attribute is a variable
+// or a field selection.
+func (a *maybeAttribute) Resolve(vars Activation) (any, error) {
+ var maybeErr error
+ for _, attr := range a.attrs {
+ obj, err := attr.Resolve(vars)
+ // Return an error if one is encountered.
+ if err != nil {
+ resErr, ok := err.(*resolutionError)
+ if !ok {
+ return nil, err
+ }
+ // If this was not a missing variable error, return it.
+ if !resErr.isMissingAttribute() {
+ return nil, err
+ }
+ // When the variable is missing in a maybe attribute we defer erroring.
+ if maybeErr == nil {
+ maybeErr = resErr
+ }
+ // Continue attempting to resolve possible variables.
+ continue
+ }
+ return obj, nil
+ }
+ // Else, produce a no such attribute error.
+ return nil, maybeErr
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *maybeAttribute) String() string {
+ return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs)
+}
+
+type relativeAttribute struct {
+ id int64
+ operand Interpretable
+ qualifiers []Qualifier
+ adapter types.Adapter
+ fac AttributeFactory
+
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Attribute interface method.
+func (a *relativeAttribute) ID() int64 {
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
+}
+
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *relativeAttribute) IsOptional() bool {
+ return false
+}
+
+// AddQualifier implements the Attribute interface method.
+func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
+ a.qualifiers = append(a.qualifiers, qual)
+ return a, nil
+}
+
+// Qualify is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
+}
+
+// Resolve expression value and qualifier relative to the expression result.
+func (a *relativeAttribute) Resolve(vars Activation) (any, error) {
+ // First, evaluate the operand.
+ v := a.operand.Eval(vars)
+ if types.IsError(v) {
+ return nil, v.(*types.Err)
+ }
+ if types.IsUnknown(v) {
+ return v, nil
+ }
+ obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
+ }
+ return types.OptionalOf(val), nil
+ }
+ return obj, nil
+}
+
+// String is an implementation of the Stringer interface method.
+func (a *relativeAttribute) String() string {
+ return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
+}
+
+func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) {
+ var qual Qualifier
+ switch val := v.(type) {
+ case Attribute:
+ // Note, attributes are initially identified as non-optional since they represent a top-level
+ // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier
+ // is created which intercepts the IsOptional check for the attribute in order to return the
+ // correct result.
+ return &attrQualifier{
+ id: id,
+ Attribute: val,
+ optional: opt,
+ }, nil
+ case string:
+ qual = &stringQualifier{
+ id: id,
+ value: val,
+ celValue: types.String(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case int:
+ qual = &intQualifier{
+ id: id,
+ value: int64(val),
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case int32:
+ qual = &intQualifier{
+ id: id,
+ value: int64(val),
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case int64:
+ qual = &intQualifier{
+ id: id,
+ value: val,
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case uint:
+ qual = &uintQualifier{
+ id: id,
+ value: uint64(val),
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case uint32:
+ qual = &uintQualifier{
+ id: id,
+ value: uint64(val),
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case uint64:
+ qual = &uintQualifier{
+ id: id,
+ value: val,
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case bool:
+ qual = &boolQualifier{
+ id: id,
+ value: val,
+ celValue: types.Bool(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case float32:
+ qual = &doubleQualifier{
+ id: id,
+ value: float64(val),
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case float64:
+ qual = &doubleQualifier{
+ id: id,
+ value: val,
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case types.String:
+ qual = &stringQualifier{
+ id: id,
+ value: string(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case types.Int:
+ qual = &intQualifier{
+ id: id,
+ value: int64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case types.Uint:
+ qual = &uintQualifier{
+ id: id,
+ value: uint64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case types.Bool:
+ qual = &boolQualifier{
+ id: id,
+ value: bool(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case types.Double:
+ qual = &doubleQualifier{
+ id: id,
+ value: float64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
+ }
+ case *types.Unknown:
+ qual = &unknownQualifier{id: id, value: val}
+ default:
+ if q, ok := v.(Qualifier); ok {
+ return q, nil
+ }
+ return nil, fmt.Errorf("invalid qualifier type: %T", v)
+ }
+ return qual, nil
+}
+
+type attrQualifier struct {
+ id int64
+ Attribute
+ optional bool
+}
+
+// ID implements the Qualifier interface method and returns the qualification instruction id
+// rather than the attribute id.
+func (q *attrQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *attrQualifier) IsOptional() bool {
+ return q.optional
+}
+
+type stringQualifier struct {
+ id int64
+ value string
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *stringQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *stringQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ s := q.value
+ switch o := obj.(type) {
+ case map[string]any:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]string:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]int64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]uint64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]float32:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]float64:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[string]bool:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *stringQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type intQualifier struct {
+ id int64
+ value int64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *intQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *intQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ i := q.value
+ var isMap bool
+ switch o := obj.(type) {
+ // The specialized map types supported by an int qualifier are considerably fewer than the set
+ // of specialized map types supported by string qualifiers since they are less frequently used
+ // than string-based map keys. Additional specializations may be added in the future if
+ // desired.
+ case map[int]any:
+ isMap = true
+ obj, isKey := o[int(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int32]any:
+ isMap = true
+ obj, isKey := o[int32(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int64]any:
+ isMap = true
+ obj, isKey := o[i]
+ if isKey {
+ return obj, true, nil
+ }
+ case []any:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []string:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []int64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []uint64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []float32:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []float64:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ case []bool:
+ isIndex := i >= 0 && i < int64(len(o))
+ if isIndex {
+ return o[i], true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ if isMap {
+ return nil, false, missingKey(q.celValue)
+ }
+ return nil, false, missingIndex(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *intQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type uintQualifier struct {
+ id int64
+ value uint64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *uintQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *uintQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ u := q.value
+ switch o := obj.(type) {
+ // The specialized map types supported by a uint qualifier are considerably fewer than the set
+ // of specialized map types supported by string qualifiers since they are less frequently used
+ // than string-based map keys. Additional specializations may be added in the future if
+ // desired.
+ case map[uint]any:
+ obj, isKey := o[uint(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint32]any:
+ obj, isKey := o[uint32(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint64]any:
+ obj, isKey := o[u]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *uintQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+type boolQualifier struct {
+ id int64
+ value bool
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *boolQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *boolQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ b := q.value
+ switch o := obj.(type) {
+ case map[bool]any:
+ obj, isKey := o[b]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(q.celValue)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *boolQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// fieldQualifier indicates that the qualification is a well-defined field with a known
+// field type. When the field type is known this can be used to improve the speed and
+// efficiency of field resolution.
+type fieldQualifier struct {
+ id int64
+ Name string
+ FieldType *types.FieldType
+ adapter types.Adapter
+ optional bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *fieldQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, err
+ }
+ return val, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ if !q.FieldType.IsSet(obj) {
+ return nil, false, nil
+ }
+ if presenceOnly {
+ return nil, true, nil
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, false, err
+ }
+ return val, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *fieldQualifier) Value() ref.Val {
+ return types.String(q.Name)
+}
+
+// doubleQualifier qualifies a CEL object, map, or list using a double value.
+//
+// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value
+// type may not be known ahead of time and may not conform to the standard types supported as valid
+// protobuf map key types.
+type doubleQualifier struct {
+ id int64
+ value float64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *doubleQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional implements the Qualifier interface method.
+func (q *doubleQualifier) IsOptional() bool {
+ return q.optional
+}
+
+// Qualify implements the Qualifier interface method.
+func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *doubleQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values
+// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere.
+type unknownQualifier struct {
+ id int64
+ value *types.Unknown
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional returns trivially false as an the unknown value is always returned.
+func (q *unknownQualifier) IsOptional() bool {
+ return false
+}
+
+// Qualify returns the unknown value associated with this qualifier.
+func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) {
+ return q.value, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.value, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *unknownQualifier) Value() ref.Val {
+ return q.value
+}
+
+func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) {
+ optObj, isOpt := obj.(*types.Optional)
+ if isOpt {
+ if !optObj.HasValue() {
+ return optObj, false, nil
+ }
+ obj = optObj.GetValue().Value()
+ }
+
+ var err error
+ for _, qual := range qualifiers {
+ var qualObj any
+ isOpt = isOpt || qual.IsOptional()
+ if isOpt {
+ var present bool
+ qualObj, present, err = qual.QualifyIfPresent(vars, obj, false)
+ if err != nil {
+ return nil, false, err
+ }
+ if !present {
+ // We return optional none here with a presence of 'false' as the layers
+ // above will attempt to call types.OptionalOf() on a present value if any
+ // of the qualifiers is optional.
+ return types.OptionalNone, false, nil
+ }
+ } else {
+ qualObj, err = qual.Qualify(vars, obj)
+ if err != nil {
+ return nil, false, err
+ }
+ }
+ obj = qualObj
+ }
+ return obj, isOpt, nil
+}
+
+// attrQualify performs a qualification using the result of an attribute evaluation.
+func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ return qual.Qualify(vars, obj)
+}
+
+// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present
+// on the target object.
+func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute,
+ presenceOnly bool) (any, bool, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, false, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, false, err
+ }
+ return qual.QualifyIfPresent(vars, obj, presenceOnly)
+}
+
+// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and
+// apply the qualifier with the option to presence test field accesses before retrieving field values.
+func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) {
+ celVal := adapter.NativeToValue(obj)
+ switch v := celVal.(type) {
+ case *types.Unknown:
+ return v, true, nil
+ case *types.Err:
+ return nil, false, v
+ case traits.Mapper:
+ val, found := v.Find(idx)
+ // If the index is of the wrong type for the map, then it is possible
+ // for the Find call to produce an error.
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ if found {
+ return val, true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ case traits.Lister:
+ // If the index argument is not a valid numeric type, then it is possible
+ // for the index operation to produce an error.
+ i, err := types.IndexOrError(idx)
+ if err != nil {
+ return nil, false, err
+ }
+ celIndex := types.Int(i)
+ if i >= 0 && celIndex < v.Size().(types.Int) {
+ return v.Get(idx), true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingIndex(idx)
+ case traits.Indexer:
+ if presenceTest {
+ ft, ok := v.(traits.FieldTester)
+ if ok {
+ presence := ft.IsSet(idx)
+ if types.IsError(presence) {
+ return nil, false, presence.(*types.Err)
+ }
+ // If not found or presence only test, then return.
+ // Otherwise, if found, obtain the value later on.
+ if presenceOnly || presence == types.False {
+ return nil, presence == types.True, nil
+ }
+ }
+ }
+ val := v.Get(idx)
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ return val, true, nil
+ default:
+ if presenceTest && !errorOnBadPresenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ }
+}
+
+// resolutionError is a custom error type which encodes the different error states which may
+// occur during attribute resolution.
+type resolutionError struct {
+ missingAttribute string
+ missingIndex ref.Val
+ missingKey ref.Val
+}
+
+func (e *resolutionError) isMissingAttribute() bool {
+ return e.missingAttribute != ""
+}
+
+func missingIndex(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingIndex: missing,
+ }
+}
+
+func missingKey(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingKey: missing,
+ }
+}
+
+func missingAttribute(attr string) *resolutionError {
+ return &resolutionError{
+ missingAttribute: attr,
+ }
+}
+
+// Error implements the error interface method.
+func (e *resolutionError) Error() string {
+ if e.missingKey != nil {
+ return fmt.Sprintf("no such key: %v", e.missingKey)
+ }
+ if e.missingIndex != nil {
+ return fmt.Sprintf("index out of bounds: %v", e.missingIndex)
+ }
+ if e.missingAttribute != "" {
+ return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute)
+ }
+ return "invalid attribute"
+}
+
+// Is implements the errors.Is() method used by more recent versions of Go.
+func (e *resolutionError) Is(err error) bool {
+ return err.Error() == e.Error()
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go
new file mode 100644
index 000000000..502db35fc
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/decorators.go
@@ -0,0 +1,272 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// InterpretableDecorator is a functional interface for decorating or replacing
+// Interpretable expression nodes at construction time.
+type InterpretableDecorator func(Interpretable) (Interpretable, error)
+
+// decObserveEval records evaluation state into an EvalState object.
+func decObserveEval(observer EvalObserver) InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch inst := i.(type) {
+ case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor:
+ // these instruction are already watching, return straight-away.
+ return i, nil
+ case InterpretableAttribute:
+ return &evalWatchAttr{
+ InterpretableAttribute: inst,
+ observer: observer,
+ }, nil
+ case InterpretableConst:
+ return &evalWatchConst{
+ InterpretableConst: inst,
+ observer: observer,
+ }, nil
+ case InterpretableConstructor:
+ return &evalWatchConstructor{
+ constructor: inst,
+ observer: observer,
+ }, nil
+ default:
+ return &evalWatch{
+ Interpretable: i,
+ observer: observer,
+ }, nil
+ }
+ }
+}
+
+// decInterruptFolds creates an intepretable decorator which marks comprehensions as interruptable
+// where the interrupt state is communicated via a hidden variable on the Activation.
+func decInterruptFolds() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ fold, ok := i.(*evalFold)
+ if !ok {
+ return i, nil
+ }
+ fold.interruptable = true
+ return fold, nil
+ }
+}
+
+// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting.
+func decDisableShortcircuits() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch expr := i.(type) {
+ case *evalOr:
+ return &evalExhaustiveOr{
+ id: expr.id,
+ terms: expr.terms,
+ }, nil
+ case *evalAnd:
+ return &evalExhaustiveAnd{
+ id: expr.id,
+ terms: expr.terms,
+ }, nil
+ case *evalFold:
+ expr.exhaustive = true
+ return expr, nil
+ case InterpretableAttribute:
+ cond, isCond := expr.Attr().(*conditionalAttribute)
+ if isCond {
+ return &evalExhaustiveConditional{
+ id: cond.id,
+ attr: cond,
+ adapter: expr.Adapter(),
+ }, nil
+ }
+ }
+ return i, nil
+ }
+}
+
+// decOptimize optimizes the program plan by looking for common evaluation patterns and
+// conditionally precomputing the result.
+// - build list and map values with constant elements.
+// - convert 'in' operations to set membership tests if possible.
+func decOptimize() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch inst := i.(type) {
+ case *evalList:
+ return maybeBuildListLiteral(i, inst)
+ case *evalMap:
+ return maybeBuildMapLiteral(i, inst)
+ case InterpretableCall:
+ if inst.OverloadID() == overloads.InList {
+ return maybeOptimizeSetMembership(i, inst)
+ }
+ if overloads.IsTypeConversionFunction(inst.Function()) {
+ return maybeOptimizeConstUnary(i, inst)
+ }
+ }
+ return i, nil
+ }
+}
+
+// decRegexOptimizer compiles regex pattern string constants.
+func decRegexOptimizer(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
+ functionMatchMap := make(map[string]*RegexOptimization)
+ overloadMatchMap := make(map[string]*RegexOptimization)
+ for _, m := range regexOptimizations {
+ functionMatchMap[m.Function] = m
+ if m.OverloadID != "" {
+ overloadMatchMap[m.OverloadID] = m
+ }
+ }
+
+ return func(i Interpretable) (Interpretable, error) {
+ call, ok := i.(InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+
+ var matcher *RegexOptimization
+ var found bool
+ if call.OverloadID() != "" {
+ matcher, found = overloadMatchMap[call.OverloadID()]
+ }
+ if !found {
+ matcher, found = functionMatchMap[call.Function()]
+ }
+ if !found || matcher.RegexIndex >= len(call.Args()) {
+ return i, nil
+ }
+ args := call.Args()
+ regexArg := args[matcher.RegexIndex]
+ regexStr, isConst := regexArg.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ pattern, ok := regexStr.Value().(types.String)
+ if !ok {
+ return i, nil
+ }
+ return matcher.Factory(call, string(pattern))
+ }
+}
+
+func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) {
+ args := call.Args()
+ if len(args) != 1 {
+ return i, nil
+ }
+ _, isConst := args[0].(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ val := call.Eval(EmptyActivation())
+ if types.IsError(val) {
+ return nil, val.(*types.Err)
+ }
+ return NewConstValue(call.ID(), val), nil
+}
+
+func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) {
+ for _, elem := range l.elems {
+ _, isConst := elem.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil
+}
+
+func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) {
+ for idx, key := range mp.keys {
+ _, isConst := key.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ _, isConst = mp.vals[idx].(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil
+}
+
+// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership
+// test if the following conditions are true:
+// - the list is a constant with homogeneous element types.
+// - the elements are all of primitive type.
+func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) {
+ args := inlist.Args()
+ lhs := args[0]
+ rhs := args[1]
+ l, isConst := rhs.(InterpretableConst)
+ if !isConst {
+ return i, nil
+ }
+ // When the incoming binary call is flagged with as the InList overload, the value will
+ // always be convertible to a `traits.Lister` type.
+ list := l.Value().(traits.Lister)
+ if list.Size() == types.IntZero {
+ return NewConstValue(inlist.ID(), types.False), nil
+ }
+ it := list.Iterator()
+ valueSet := make(map[ref.Val]ref.Val)
+ for it.HasNext() == types.True {
+ elem := it.Next()
+ if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType {
+ // Note, non-primitive type are not yet supported, and []byte isn't hashable.
+ return i, nil
+ }
+ valueSet[elem] = types.True
+ switch ev := elem.(type) {
+ case types.Double:
+ iv := ev.ConvertToType(types.IntType)
+ // Ensure that only lossless conversions are added to the set
+ if !types.IsError(iv) && iv.Equal(ev) == types.True {
+ valueSet[iv] = types.True
+ }
+ // Ensure that only lossless conversions are added to the set
+ uv := ev.ConvertToType(types.UintType)
+ if !types.IsError(uv) && uv.Equal(ev) == types.True {
+ valueSet[uv] = types.True
+ }
+ case types.Int:
+ dv := ev.ConvertToType(types.DoubleType)
+ if !types.IsError(dv) {
+ valueSet[dv] = types.True
+ }
+ uv := ev.ConvertToType(types.UintType)
+ if !types.IsError(uv) {
+ valueSet[uv] = types.True
+ }
+ case types.Uint:
+ dv := ev.ConvertToType(types.DoubleType)
+ if !types.IsError(dv) {
+ valueSet[dv] = types.True
+ }
+ iv := ev.ConvertToType(types.IntType)
+ if !types.IsError(iv) {
+ valueSet[iv] = types.True
+ }
+ }
+ }
+ return &evalSetMembership{
+ inst: inlist,
+ arg: lhs,
+ valueSet: valueSet,
+ }, nil
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
new file mode 100644
index 000000000..8f0bdb7b8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
@@ -0,0 +1,100 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/functions"
+)
+
+// Dispatcher resolves function calls to their appropriate overload.
+type Dispatcher interface {
+ // Add one or more overloads, returning an error if any Overload has the same Overload#Name.
+ Add(overloads ...*functions.Overload) error
+
+ // FindOverload returns an Overload definition matching the provided name.
+ FindOverload(overload string) (*functions.Overload, bool)
+
+ // OverloadIds returns the set of all overload identifiers configured for dispatch.
+ OverloadIds() []string
+}
+
+// NewDispatcher returns an empty Dispatcher instance.
+func NewDispatcher() Dispatcher {
+ return &defaultDispatcher{
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and
+// provides an isolation layer between built-ins and extension functions which is useful
+// for forward compatibility.
+func ExtendDispatcher(parent Dispatcher) Dispatcher {
+ return &defaultDispatcher{
+ parent: parent,
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// overloadMap helper type for indexing overloads by function name.
+type overloadMap map[string]*functions.Overload
+
+// defaultDispatcher struct which contains an overload map.
+type defaultDispatcher struct {
+ parent Dispatcher
+ overloads overloadMap
+}
+
+// Add implements the Dispatcher.Add interface method.
+func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error {
+ for _, o := range overloads {
+ // add the overload unless an overload of the same name has already been provided.
+ if _, found := d.overloads[o.Operator]; found {
+ return fmt.Errorf("overload already exists '%s'", o.Operator)
+ }
+ // index the overload by function name.
+ d.overloads[o.Operator] = o
+ }
+ return nil
+}
+
+// FindOverload implements the Dispatcher.FindOverload interface method.
+func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) {
+ o, found := d.overloads[overload]
+ // Attempt to dispatch to an overload defined in the parent.
+ if !found && d.parent != nil {
+ return d.parent.FindOverload(overload)
+ }
+ return o, found
+}
+
+// OverloadIds implements the Dispatcher interface method.
+func (d *defaultDispatcher) OverloadIds() []string {
+ i := 0
+ overloads := make([]string, len(d.overloads))
+ for name := range d.overloads {
+ overloads[i] = name
+ i++
+ }
+ if d.parent == nil {
+ return overloads
+ }
+ parentOverloads := d.parent.OverloadIds()
+ for _, pName := range parentOverloads {
+ if _, found := d.overloads[pName]; !found {
+ overloads = append(overloads, pName)
+ }
+ }
+ return overloads
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go
new file mode 100644
index 000000000..4bdd1fdc7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// EvalState tracks the values associated with expression ids during execution.
+type EvalState interface {
+ // IDs returns the list of ids with recorded values.
+ IDs() []int64
+
+ // Value returns the observed value of the given expression id if found, and a nil false
+ // result if not.
+ Value(int64) (ref.Val, bool)
+
+ // SetValue sets the observed value of the expression id.
+ SetValue(int64, ref.Val)
+
+ // Reset clears the previously recorded expression values.
+ Reset()
+}
+
+// evalState permits the mutation of evaluation state for a given expression id.
+type evalState struct {
+ values map[int64]ref.Val
+}
+
+// NewEvalState returns an EvalState instanced used to observe the intermediate
+// evaluations of an expression.
+func NewEvalState() EvalState {
+ return &evalState{
+ values: make(map[int64]ref.Val),
+ }
+}
+
+// IDs implements the EvalState interface method.
+func (s *evalState) IDs() []int64 {
+ var ids []int64
+ for k, v := range s.values {
+ if v != nil {
+ ids = append(ids, k)
+ }
+ }
+ return ids
+}
+
+// Value is an implementation of the EvalState interface method.
+func (s *evalState) Value(exprID int64) (ref.Val, bool) {
+ val, found := s.values[exprID]
+ return val, found
+}
+
+// SetValue is an implementation of the EvalState interface method.
+func (s *evalState) SetValue(exprID int64, val ref.Val) {
+ if val == nil {
+ delete(s.values, exprID)
+ } else {
+ s.values[exprID] = val
+ }
+}
+
+// Reset implements the EvalState interface method.
+func (s *evalState) Reset() {
+ s.values = map[int64]ref.Val{}
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go
new file mode 100644
index 000000000..ebc432e9d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go
@@ -0,0 +1,1407 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Interpretable can accept a given Activation and produce a value along with
+// an accompanying EvalState which can be used to inspect whether additional
+// data might be necessary to complete the evaluation.
+type Interpretable interface {
+ // ID value corresponding to the expression node.
+ ID() int64
+
+ // Eval an Activation to produce an output.
+ Eval(activation Activation) ref.Val
+}
+
+// InterpretableConst interface for tracking whether the Interpretable is a constant value.
+type InterpretableConst interface {
+ Interpretable
+
+ // Value returns the constant value of the instruction.
+ Value() ref.Val
+}
+
+// InterpretableAttribute interface for tracking whether the Interpretable is an attribute.
+type InterpretableAttribute interface {
+ Interpretable
+
+ // Attr returns the Attribute value.
+ Attr() Attribute
+
+ // Adapter returns the type adapter to be used for adapting resolved Attribute values.
+ Adapter() types.Adapter
+
+ // AddQualifier proxies the Attribute.AddQualifier method.
+ //
+ // Note, this method may mutate the current attribute state. If the desire is to clone the
+ // Attribute, the Attribute should first be copied before adding the qualifier. Attributes
+ // are not copyable by default, so this is a capable that would need to be added to the
+ // AttributeFactory or specifically to the underlying Attribute implementation.
+ AddQualifier(Qualifier) (Attribute, error)
+
+ // Qualify replicates the Attribute.Qualify method to permit extension and interception
+ // of object qualification.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+
+ // IsOptional indicates whether the resulting value is an optional type.
+ IsOptional() bool
+
+ // Resolve returns the value of the Attribute given the current Activation.
+ Resolve(Activation) (any, error)
+}
+
+// InterpretableCall interface for inspecting Interpretable instructions related to function calls.
+type InterpretableCall interface {
+ Interpretable
+
+ // Function returns the function name as it appears in text or mangled operator name as it
+ // appears in the operators.go file.
+ Function() string
+
+ // OverloadID returns the overload id associated with the function specialization.
+ // Overload ids are stable across language boundaries and can be treated as synonymous with a
+ // unique function signature.
+ OverloadID() string
+
+ // Args returns the normalized arguments to the function overload.
+ // For receiver-style functions, the receiver target is arg 0.
+ Args() []Interpretable
+}
+
+// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map
+// or struct.
+type InterpretableConstructor interface {
+ Interpretable
+
+ // InitVals returns all the list elements, map key and values or struct field values.
+ InitVals() []Interpretable
+
+ // Type returns the type constructed.
+ Type() ref.Type
+}
+
+// Core Interpretable implementations used during the program planning phase.
+
+type evalTestOnly struct {
+ id int64
+ InterpretableAttribute
+}
+
+// ID implements the Interpretable interface method.
+func (test *evalTestOnly) ID() int64 {
+ return test.id
+}
+
+// Eval implements the Interpretable interface method.
+func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
+ val, err := test.Resolve(ctx)
+ // Return an error if the resolve step fails
+ if err != nil {
+ return types.LabelErrNode(test.id, types.WrapErr(err))
+ }
+ if optVal, isOpt := val.(*types.Optional); isOpt {
+ return types.Bool(optVal.HasValue())
+ }
+ return test.Adapter().NativeToValue(val)
+}
+
+// AddQualifier appends a qualifier that will always and only perform a presence test.
+func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) {
+ cq, ok := q.(ConstantQualifier)
+ if !ok {
+ return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q)
+ }
+ return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq})
+}
+
+type testOnlyQualifier struct {
+ ConstantQualifier
+}
+
+// Qualify determines whether the test-only qualifier is present on the input object.
+func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) {
+ out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+ if err != nil {
+ return nil, err
+ }
+ if unk, isUnk := out.(types.Unknown); isUnk {
+ return unk, nil
+ }
+ if opt, isOpt := out.(types.Optional); isOpt {
+ return opt.HasValue(), nil
+ }
+ return present, nil
+}
+
+// QualifyIfPresent returns whether the target field in the test-only expression is present.
+func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ // Only ever test for presence.
+ return q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+}
+
+// QualifierValueEquals determines whether the test-only constant qualifier equals the input value.
+func (q *testOnlyQualifier) QualifierValueEquals(value any) bool {
+ // The input qualifier will always be of type string
+ return q.ConstantQualifier.Value().Value() == value
+}
+
+// NewConstValue creates a new constant valued Interpretable.
+func NewConstValue(id int64, val ref.Val) InterpretableConst {
+ return &evalConst{
+ id: id,
+ val: val,
+ }
+}
+
+type evalConst struct {
+ id int64
+ val ref.Val
+}
+
+// ID implements the Interpretable interface method.
+func (cons *evalConst) ID() int64 {
+ return cons.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cons *evalConst) Eval(ctx Activation) ref.Val {
+ return cons.val
+}
+
+// Value implements the InterpretableConst interface method.
+func (cons *evalConst) Value() ref.Val {
+ return cons.val
+}
+
+type evalOr struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalOr) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ for _, term := range or.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on true.
+ if ok && boolVal == types.True {
+ return types.True
+ }
+ if !ok {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ err = types.LabelErrNode(or.id, err)
+ }
+ }
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.False
+}
+
+type evalAnd struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalAnd) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ for _, term := range and.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on false.
+ if ok && boolVal == types.False {
+ return types.False
+ }
+ if !ok {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ err = types.LabelErrNode(and.id, err)
+ }
+ }
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.True
+}
+
+type evalEq struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (eq *evalEq) ID() int64 {
+ return eq.id
+}
+
+// Eval implements the Interpretable interface method.
+func (eq *evalEq) Eval(ctx Activation) ref.Val {
+ lVal := eq.lhs.Eval(ctx)
+ rVal := eq.rhs.Eval(ctx)
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ return types.Equal(lVal, rVal)
+}
+
+// Function implements the InterpretableCall interface method.
+func (*evalEq) Function() string {
+ return operators.Equals
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (*evalEq) OverloadID() string {
+ return overloads.Equals
+}
+
+// Args implements the InterpretableCall interface method.
+func (eq *evalEq) Args() []Interpretable {
+ return []Interpretable{eq.lhs, eq.rhs}
+}
+
+type evalNe struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (ne *evalNe) ID() int64 {
+ return ne.id
+}
+
+// Eval implements the Interpretable interface method.
+func (ne *evalNe) Eval(ctx Activation) ref.Val {
+ lVal := ne.lhs.Eval(ctx)
+ rVal := ne.rhs.Eval(ctx)
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ return types.Bool(types.Equal(lVal, rVal) != types.True)
+}
+
+// Function implements the InterpretableCall interface method.
+func (*evalNe) Function() string {
+ return operators.NotEquals
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (*evalNe) OverloadID() string {
+ return overloads.NotEquals
+}
+
+// Args implements the InterpretableCall interface method.
+func (ne *evalNe) Args() []Interpretable {
+ return []Interpretable{ne.lhs, ne.rhs}
+}
+
+type evalZeroArity struct {
+ id int64
+ function string
+ overload string
+ impl functions.FunctionOp
+}
+
+// ID implements the Interpretable interface method.
+func (zero *evalZeroArity) ID() int64 {
+ return zero.id
+}
+
+// Eval implements the Interpretable interface method.
+func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
+ return types.LabelErrNode(zero.id, zero.impl())
+}
+
+// Function implements the InterpretableCall interface method.
+func (zero *evalZeroArity) Function() string {
+ return zero.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (zero *evalZeroArity) OverloadID() string {
+ return zero.overload
+}
+
+// Args returns the argument to the unary function.
+func (zero *evalZeroArity) Args() []Interpretable {
+ return []Interpretable{}
+}
+
+type evalUnary struct {
+ id int64
+ function string
+ overload string
+ arg Interpretable
+ trait int
+ impl functions.UnaryOp
+ nonStrict bool
+}
+
+// ID implements the Interpretable interface method.
+func (un *evalUnary) ID() int64 {
+ return un.id
+}
+
+// Eval implements the Interpretable interface method.
+func (un *evalUnary) Eval(ctx Activation) ref.Val {
+ argVal := un.arg.Eval(ctx)
+ // Early return if the argument to the function is unknown or error.
+ strict := !un.nonStrict
+ if strict && types.IsUnknownOrError(argVal) {
+ return argVal
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) {
+ return types.LabelErrNode(un.id, un.impl(argVal))
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if argVal.Type().HasTrait(traits.ReceiverType) {
+ return types.LabelErrNode(un.id, argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{}))
+ }
+ return types.NewErrWithNodeID(un.id, "no such overload: %s", un.function)
+}
+
+// Function implements the InterpretableCall interface method.
+func (un *evalUnary) Function() string {
+ return un.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (un *evalUnary) OverloadID() string {
+ return un.overload
+}
+
+// Args returns the argument to the unary function.
+func (un *evalUnary) Args() []Interpretable {
+ return []Interpretable{un.arg}
+}
+
+type evalBinary struct {
+ id int64
+ function string
+ overload string
+ lhs Interpretable
+ rhs Interpretable
+ trait int
+ impl functions.BinaryOp
+ nonStrict bool
+}
+
+// ID implements the Interpretable interface method.
+func (bin *evalBinary) ID() int64 {
+ return bin.id
+}
+
+// Eval implements the Interpretable interface method.
+func (bin *evalBinary) Eval(ctx Activation) ref.Val {
+ lVal := bin.lhs.Eval(ctx)
+ rVal := bin.rhs.Eval(ctx)
+ // Early return if any argument to the function is unknown or error.
+ strict := !bin.nonStrict
+ if strict {
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) {
+ return types.LabelErrNode(bin.id, bin.impl(lVal, rVal))
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if lVal.Type().HasTrait(traits.ReceiverType) {
+ return types.LabelErrNode(bin.id, lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal}))
+ }
+ return types.NewErrWithNodeID(bin.id, "no such overload: %s", bin.function)
+}
+
+// Function implements the InterpretableCall interface method.
+func (bin *evalBinary) Function() string {
+ return bin.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (bin *evalBinary) OverloadID() string {
+ return bin.overload
+}
+
+// Args returns the argument to the unary function.
+func (bin *evalBinary) Args() []Interpretable {
+ return []Interpretable{bin.lhs, bin.rhs}
+}
+
+type evalVarArgs struct {
+ id int64
+ function string
+ overload string
+ args []Interpretable
+ trait int
+ impl functions.FunctionOp
+ nonStrict bool
+}
+
+// NewCall creates a new call Interpretable.
+func NewCall(id int64, function, overload string, args []Interpretable, impl functions.FunctionOp) InterpretableCall {
+ return &evalVarArgs{
+ id: id,
+ function: function,
+ overload: overload,
+ args: args,
+ impl: impl,
+ }
+}
+
+// ID implements the Interpretable interface method.
+func (fn *evalVarArgs) ID() int64 {
+ return fn.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
+ argVals := make([]ref.Val, len(fn.args))
+ // Early return if any argument to the function is unknown or error.
+ strict := !fn.nonStrict
+ for i, arg := range fn.args {
+ argVals[i] = arg.Eval(ctx)
+ if strict && types.IsUnknownOrError(argVals[i]) {
+ return argVals[i]
+ }
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ arg0 := argVals[0]
+ if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) {
+ return types.LabelErrNode(fn.id, fn.impl(argVals...))
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if arg0.Type().HasTrait(traits.ReceiverType) {
+ return types.LabelErrNode(fn.id, arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:]))
+ }
+ return types.NewErrWithNodeID(fn.id, "no such overload: %s %d", fn.function, fn.id)
+}
+
+// Function implements the InterpretableCall interface method.
+func (fn *evalVarArgs) Function() string {
+ return fn.function
+}
+
+// OverloadID implements the InterpretableCall interface method.
+func (fn *evalVarArgs) OverloadID() string {
+ return fn.overload
+}
+
+// Args returns the argument to the unary function.
+func (fn *evalVarArgs) Args() []Interpretable {
+ return fn.args
+}
+
+type evalList struct {
+ id int64
+ elems []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter types.Adapter
+}
+
+// ID implements the Interpretable interface method.
+func (l *evalList) ID() int64 {
+ return l.id
+}
+
+// Eval implements the Interpretable interface method.
+func (l *evalList) Eval(ctx Activation) ref.Val {
+ elemVals := make([]ref.Val, 0, len(l.elems))
+ // If any argument is unknown or error early terminate.
+ for i, elem := range l.elems {
+ elemVal := elem.Eval(ctx)
+ if types.IsUnknownOrError(elemVal) {
+ return elemVal
+ }
+ if l.hasOptionals && l.optionals[i] {
+ optVal, ok := elemVal.(*types.Optional)
+ if !ok {
+ return types.LabelErrNode(l.id, invalidOptionalElementInit(elemVal))
+ }
+ if !optVal.HasValue() {
+ continue
+ }
+ elemVal = optVal.GetValue()
+ }
+ elemVals = append(elemVals, elemVal)
+ }
+ return l.adapter.NativeToValue(elemVals)
+}
+
+func (l *evalList) InitVals() []Interpretable {
+ return l.elems
+}
+
+func (l *evalList) Type() ref.Type {
+ return types.ListType
+}
+
+type evalMap struct {
+ id int64
+ keys []Interpretable
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter types.Adapter
+}
+
+// ID implements the Interpretable interface method.
+func (m *evalMap) ID() int64 {
+ return m.id
+}
+
+// Eval implements the Interpretable interface method.
+func (m *evalMap) Eval(ctx Activation) ref.Val {
+ entries := make(map[ref.Val]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, key := range m.keys {
+ keyVal := key.Eval(ctx)
+ if types.IsUnknownOrError(keyVal) {
+ return keyVal
+ }
+ valVal := m.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(valVal) {
+ return valVal
+ }
+ if m.hasOptionals && m.optionals[i] {
+ optVal, ok := valVal.(*types.Optional)
+ if !ok {
+ return types.LabelErrNode(m.id, invalidOptionalEntryInit(keyVal, valVal))
+ }
+ if !optVal.HasValue() {
+ delete(entries, keyVal)
+ continue
+ }
+ valVal = optVal.GetValue()
+ }
+ entries[keyVal] = valVal
+ }
+ return m.adapter.NativeToValue(entries)
+}
+
+func (m *evalMap) InitVals() []Interpretable {
+ if len(m.keys) != len(m.vals) {
+ return nil
+ }
+ result := make([]Interpretable, len(m.keys)+len(m.vals))
+ idx := 0
+ for i, k := range m.keys {
+ v := m.vals[i]
+ result[idx] = k
+ idx++
+ result[idx] = v
+ idx++
+ }
+ return result
+}
+
+func (m *evalMap) Type() ref.Type {
+ return types.MapType
+}
+
+type evalObj struct {
+ id int64
+ typeName string
+ fields []string
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ provider types.Provider
+}
+
+// ID implements the Interpretable interface method.
+func (o *evalObj) ID() int64 {
+ return o.id
+}
+
+// Eval implements the Interpretable interface method.
+func (o *evalObj) Eval(ctx Activation) ref.Val {
+ fieldVals := make(map[string]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, field := range o.fields {
+ val := o.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
+ if o.hasOptionals && o.optionals[i] {
+ optVal, ok := val.(*types.Optional)
+ if !ok {
+ return types.LabelErrNode(o.id, invalidOptionalEntryInit(field, val))
+ }
+ if !optVal.HasValue() {
+ delete(fieldVals, field)
+ continue
+ }
+ val = optVal.GetValue()
+ }
+ fieldVals[field] = val
+ }
+ return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals))
+}
+
+// InitVals implements the InterpretableConstructor interface method.
+func (o *evalObj) InitVals() []Interpretable {
+ return o.vals
+}
+
+// Type implements the InterpretableConstructor interface method.
+func (o *evalObj) Type() ref.Type {
+ return types.NewObjectType(o.typeName)
+}
+
+type evalFold struct {
+ id int64
+ accuVar string
+ iterVar string
+ iterVar2 string
+ iterRange Interpretable
+ accu Interpretable
+ cond Interpretable
+ step Interpretable
+ result Interpretable
+ adapter types.Adapter
+
+ // note an exhaustive fold will ensure that all branches are evaluated
+ // when using mutable values, these branches will mutate the final result
+ // rather than make a throw-away computation.
+ exhaustive bool
+ interruptable bool
+}
+
+// ID implements the Interpretable interface method.
+func (fold *evalFold) ID() int64 {
+ return fold.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fold *evalFold) Eval(ctx Activation) ref.Val {
+ // Initialize the folder interface
+ f := newFolder(fold, ctx)
+ defer releaseFolder(f)
+
+ foldRange := fold.iterRange.Eval(ctx)
+ if fold.iterVar2 != "" {
+ var foldable traits.Foldable
+ switch r := foldRange.(type) {
+ case traits.Mapper:
+ foldable = types.ToFoldableMap(r)
+ case traits.Lister:
+ foldable = types.ToFoldableList(r)
+ default:
+ return types.NewErrWithNodeID(fold.ID(), "unsupported comprehension range type: %T", foldRange)
+ }
+ foldable.Fold(f)
+ return f.evalResult()
+ }
+
+ if !foldRange.Type().HasTrait(traits.IterableType) {
+ return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
+ }
+ iterable := foldRange.(traits.Iterable)
+ return f.foldIterable(iterable)
+}
+
+// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
+// plan via decorators.
+
+// evalSetMembership is an Interpretable implementation which tests whether an input value
+// exists within the set of map keys used to model a set.
+type evalSetMembership struct {
+ inst Interpretable
+ arg Interpretable
+ valueSet map[ref.Val]ref.Val
+}
+
+// ID implements the Interpretable interface method.
+func (e *evalSetMembership) ID() int64 {
+ return e.inst.ID()
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalSetMembership) Eval(ctx Activation) ref.Val {
+ val := e.arg.Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
+ if ret, found := e.valueSet[val]; found {
+ return ret
+ }
+ return types.False
+}
+
+// evalWatch is an Interpretable implementation that wraps the execution of a given
+// expression so that it may observe the computed value and send it to an observer.
+type evalWatch struct {
+ Interpretable
+ observer EvalObserver
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatch) Eval(ctx Activation) ref.Val {
+ val := e.Interpretable.Eval(ctx)
+ e.observer(e.ID(), e.Interpretable, val)
+ return val
+}
+
+// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable.
+//
+// Since the watcher may be selected against at a later stage in program planning, the watcher
+// must implement the InterpretableAttribute interface by proxy.
+type evalWatchAttr struct {
+ InterpretableAttribute
+ observer EvalObserver
+}
+
+// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification
+// result.
+func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) {
+ switch qual := q.(type) {
+ // By default, the qualifier is either a constant or an attribute
+ // There may be some custom cases where the attribute is neither.
+ case ConstantQualifier:
+ // Expose a method to test whether the qualifier matches the input pattern.
+ q = &evalWatchConstQual{
+ ConstantQualifier: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ case *evalWatchAttr:
+ // Unwrap the evalWatchAttr since the observation will be applied during Qualify or
+ // QualifyIfPresent rather than Eval.
+ q = &evalWatchAttrQual{
+ Attribute: qual.InterpretableAttribute,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ case Attribute:
+ // Expose methods which intercept the qualification prior to being applied as a qualifier.
+ // Using this interface ensures that the qualifier is converted to a constant value one
+ // time during attribute pattern matching as the method embeds the Attribute interface
+ // needed to trip the conversion to a constant.
+ q = &evalWatchAttrQual{
+ Attribute: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ default:
+ // This is likely a custom qualifier type.
+ q = &evalWatchQual{
+ Qualifier: qual,
+ observer: e.observer,
+ adapter: e.Adapter(),
+ }
+ }
+ _, err := e.InterpretableAttribute.AddQualifier(q)
+ return e, err
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatchAttr) Eval(vars Activation) ref.Val {
+ val := e.InterpretableAttribute.Eval(vars)
+ e.observer(e.ID(), e.InterpretableAttribute, val)
+ return val
+}
+
+// evalWatchConstQual observes the qualification of an object using a constant boolean, int,
+// string, or uint.
+type evalWatchConstQual struct {
+ ConstantQualifier
+ observer EvalObserver
+ adapter types.Adapter
+}
+
+// Qualify observes the qualification of a object via a constant boolean, int, string, or uint.
+func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.ConstantQualifier.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.ConstantQualifier, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.ConstantQualifier, val)
+ }
+ return out, present, err
+}
+
+// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant.
+func (e *evalWatchConstQual) QualifierValueEquals(value any) bool {
+ qve, ok := e.ConstantQualifier.(qualifierValueEquator)
+ return ok && qve.QualifierValueEquals(value)
+}
+
+// evalWatchAttrQual observes the qualification of an object by a value computed at runtime.
+type evalWatchAttrQual struct {
+ Attribute
+ observer EvalObserver
+ adapter ref.TypeAdapter
+}
+
+// Qualify observes the qualification of a object via a value computed at runtime.
+func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.Attribute.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.Attribute, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.Attribute, val)
+ }
+ return out, present, err
+}
+
+// evalWatchQual observes the qualification of an object by a value computed at runtime.
+type evalWatchQual struct {
+ Qualifier
+ observer EvalObserver
+ adapter types.Adapter
+}
+
+// Qualify observes the qualification of a object via a value computed at runtime.
+func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) {
+ out, err := e.Qualifier.Qualify(vars, obj)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else {
+ val = e.adapter.NativeToValue(out)
+ }
+ e.observer(e.ID(), e.Qualifier, val)
+ return out, err
+}
+
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.LabelErrNode(e.ID(), types.WrapErr(err))
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.Qualifier, val)
+ }
+ return out, present, err
+}
+
+// evalWatchConst describes a watcher of an instConst Interpretable.
+type evalWatchConst struct {
+ InterpretableConst
+ observer EvalObserver
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatchConst) Eval(vars Activation) ref.Val {
+ val := e.Value()
+ e.observer(e.ID(), e.InterpretableConst, val)
+ return val
+}
+
+// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
+type evalExhaustiveOr struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalExhaustiveOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ isTrue := false
+ for _, term := range or.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // flag the result as true
+ if ok && boolVal == types.True {
+ isTrue = true
+ }
+ if !ok && !isTrue {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if isTrue {
+ return types.True
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.False
+}
+
+// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
+type evalExhaustiveAnd struct {
+ id int64
+ terms []Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
+ var err ref.Val = nil
+ var unk *types.Unknown
+ isFalse := false
+ for _, term := range and.terms {
+ val := term.Eval(ctx)
+ boolVal, ok := val.(types.Bool)
+ // short-circuit on false.
+ if ok && boolVal == types.False {
+ isFalse = true
+ }
+ if !ok && !isFalse {
+ isUnk := false
+ unk, isUnk = types.MaybeMergeUnknowns(val, unk)
+ if !isUnk && err == nil {
+ if types.IsError(val) {
+ err = val
+ } else {
+ err = types.MaybeNoSuchOverloadErr(val)
+ }
+ }
+ }
+ }
+ if isFalse {
+ return types.False
+ }
+ if unk != nil {
+ return unk
+ }
+ if err != nil {
+ return err
+ }
+ return types.True
+}
+
+// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
+// evaluation.
+type evalExhaustiveConditional struct {
+ id int64
+ adapter types.Adapter
+ attr *conditionalAttribute
+}
+
+// ID implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) ID() int64 {
+ return cond.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
+ cVal := cond.attr.expr.Eval(ctx)
+ tVal, tErr := cond.attr.truthy.Resolve(ctx)
+ fVal, fErr := cond.attr.falsy.Resolve(ctx)
+ cBool, ok := cVal.(types.Bool)
+ if !ok {
+ return types.ValOrErr(cVal, "no such overload")
+ }
+ if cBool {
+ if tErr != nil {
+ return types.LabelErrNode(cond.id, types.WrapErr(tErr))
+ }
+ return cond.adapter.NativeToValue(tVal)
+ }
+ if fErr != nil {
+ return types.LabelErrNode(cond.id, types.WrapErr(fErr))
+ }
+ return cond.adapter.NativeToValue(fVal)
+}
+
+// evalAttr evaluates an Attribute value.
+type evalAttr struct {
+ adapter types.Adapter
+ attr Attribute
+ optional bool
+}
+
+var _ InterpretableAttribute = &evalAttr{}
+
+// ID of the attribute instruction.
+func (a *evalAttr) ID() int64 {
+ return a.attr.ID()
+}
+
+// AddQualifier implements the InterpretableAttribute interface method.
+func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) {
+ attr, err := a.attr.AddQualifier(qual)
+ a.attr = attr
+ return attr, err
+}
+
+// Attr implements the InterpretableAttribute interface method.
+func (a *evalAttr) Attr() Attribute {
+ return a.attr
+}
+
+// Adapter implements the InterpretableAttribute interface method.
+func (a *evalAttr) Adapter() types.Adapter {
+ return a.adapter
+}
+
+// Eval implements the Interpretable interface method.
+func (a *evalAttr) Eval(ctx Activation) ref.Val {
+ v, err := a.attr.Resolve(ctx)
+ if err != nil {
+ return types.LabelErrNode(a.ID(), types.WrapErr(err))
+ }
+ return a.adapter.NativeToValue(v)
+}
+
+// Qualify proxies to the Attribute's Qualify method.
+func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) {
+ return a.attr.Qualify(ctx, obj)
+}
+
+// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method.
+func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return a.attr.QualifyIfPresent(ctx, obj, presenceOnly)
+}
+
+func (a *evalAttr) IsOptional() bool {
+ return a.optional
+}
+
+// Resolve proxies to the Attribute's Resolve method.
+func (a *evalAttr) Resolve(ctx Activation) (any, error) {
+ return a.attr.Resolve(ctx)
+}
+
+type evalWatchConstructor struct {
+ constructor InterpretableConstructor
+ observer EvalObserver
+}
+
+// InitVals implements the InterpretableConstructor InitVals function.
+func (c *evalWatchConstructor) InitVals() []Interpretable {
+ return c.constructor.InitVals()
+}
+
+// Type implements the InterpretableConstructor Type function.
+func (c *evalWatchConstructor) Type() ref.Type {
+ return c.constructor.Type()
+}
+
+// ID implements the Interpretable ID function.
+func (c *evalWatchConstructor) ID() int64 {
+ return c.constructor.ID()
+}
+
+// Eval implements the Interpretable Eval function.
+func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val {
+ val := c.constructor.Eval(ctx)
+ c.observer(c.ID(), c.constructor, val)
+ return val
+}
+
+func invalidOptionalEntryInit(field any, value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value)
+}
+
+func invalidOptionalElementInit(value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional list element from non-optional value %v", value)
+}
+
+// newFolder creates or initializes a pooled folder instance.
+func newFolder(eval *evalFold, ctx Activation) *folder {
+ f := folderPool.Get().(*folder)
+ f.evalFold = eval
+ f.Activation = ctx
+ return f
+}
+
+// releaseFolder resets and releases a pooled folder instance.
+func releaseFolder(f *folder) {
+ f.reset()
+ folderPool.Put(f)
+}
+
+// folder tracks the state associated with folding a list or map with a comprehension v2 style macro.
+//
+// The folder embeds an interpreter.Activation and Interpretable evalFold value as well as implements
+// the traits.Folder interface methods.
+//
+// Instances of a folder are intended to be pooled to minimize allocation overhead with this temporary
+// bookkeeping object which supports lazy evaluation of the accumulator init expression which is useful
+// in preserving evaluation order semantics which might otherwise be disrupted through the use of
+// cel.bind or cel.@block.
+type folder struct {
+ *evalFold
+ Activation
+
+ // fold state objects.
+ accuVal ref.Val
+ iterVar1Val any
+ iterVar2Val any
+
+ // bookkeeping flags to modify Activation and fold behaviors.
+ initialized bool
+ mutableValue bool
+ interrupted bool
+ computeResult bool
+}
+
+func (f *folder) foldIterable(iterable traits.Iterable) ref.Val {
+ it := iterable.Iterator()
+ for it.HasNext() == types.True {
+ f.iterVar1Val = it.Next()
+
+ cond := f.cond.Eval(f)
+ condBool, ok := cond.(types.Bool)
+ if f.interrupted || (!f.exhaustive && ok && condBool != types.True) {
+ return f.evalResult()
+ }
+
+ // Update the accumulation value and check for eval interuption.
+ f.accuVal = f.step.Eval(f)
+ f.initialized = true
+ if f.interruptable && checkInterrupt(f.Activation) {
+ f.interrupted = true
+ return f.evalResult()
+ }
+ }
+ return f.evalResult()
+}
+
+// FoldEntry will either fold comprehension v1 style macros if iterVar2 is unset, or comprehension v2 style
+// macros if both the iterVar and iterVar2 are set to non-empty strings.
+func (f *folder) FoldEntry(key, val any) bool {
+ // Default to referencing both values.
+ f.iterVar1Val = key
+ f.iterVar2Val = val
+
+ // Terminate evaluation if evaluation is interrupted or the condition is not true and exhaustive
+ // eval is not enabled.
+ cond := f.cond.Eval(f)
+ condBool, ok := cond.(types.Bool)
+ if f.interrupted || (!f.exhaustive && ok && condBool != types.True) {
+ return false
+ }
+
+ // Update the accumulation value and check for eval interuption.
+ f.accuVal = f.step.Eval(f)
+ f.initialized = true
+ if f.interruptable && checkInterrupt(f.Activation) {
+ f.interrupted = true
+ return false
+ }
+ return true
+}
+
+// ResolveName overrides the default Activation lookup to perform lazy initialization of the accumulator
+// and specialized lookups of iteration values with consideration for whether the final result is being
+// computed and the iteration variables should be ignored.
+func (f *folder) ResolveName(name string) (any, bool) {
+ if name == f.accuVar {
+ if !f.initialized {
+ f.initialized = true
+ initVal := f.accu.Eval(f.Activation)
+ if !f.exhaustive {
+ if l, isList := initVal.(traits.Lister); isList && l.Size() == types.IntZero {
+ initVal = types.NewMutableList(f.adapter)
+ f.mutableValue = true
+ }
+ if m, isMap := initVal.(traits.Mapper); isMap && m.Size() == types.IntZero {
+ initVal = types.NewMutableMap(f.adapter, map[ref.Val]ref.Val{})
+ f.mutableValue = true
+ }
+ }
+ f.accuVal = initVal
+ }
+ return f.accuVal, true
+ }
+ if !f.computeResult {
+ if name == f.iterVar {
+ f.iterVar1Val = f.adapter.NativeToValue(f.iterVar1Val)
+ return f.iterVar1Val, true
+ }
+ if name == f.iterVar2 {
+ f.iterVar2Val = f.adapter.NativeToValue(f.iterVar2Val)
+ return f.iterVar2Val, true
+ }
+ }
+ return f.Activation.ResolveName(name)
+}
+
+// evalResult computes the final result of the fold after all entries have been folded and accumulated.
+func (f *folder) evalResult() ref.Val {
+ f.computeResult = true
+ if f.interrupted {
+ return types.NewErr("operation interrupted")
+ }
+ res := f.result.Eval(f)
+ // Convert a mutable list or map to an immutable one if the comprehension has generated a list or
+ // map as a result.
+ if !types.IsUnknownOrError(res) && f.mutableValue {
+ if _, ok := res.(traits.MutableLister); ok {
+ res = res.(traits.MutableLister).ToImmutableList()
+ }
+ if _, ok := res.(traits.MutableMapper); ok {
+ res = res.(traits.MutableMapper).ToImmutableMap()
+ }
+ }
+ return res
+}
+
+// reset clears any state associated with folder evaluation.
+func (f *folder) reset() {
+ f.evalFold = nil
+ f.Activation = nil
+ f.accuVal = nil
+ f.iterVar1Val = nil
+ f.iterVar2Val = nil
+
+ f.initialized = false
+ f.mutableValue = false
+ f.interrupted = false
+ f.computeResult = false
+}
+
+func checkInterrupt(a Activation) bool {
+ stop, found := a.ResolveName("#interrupted")
+ return found && stop == true
+}
+
+var (
+ // pool of var folders to reduce allocations during folds.
+ folderPool = &sync.Pool{
+ New: func() any {
+ return &folder{}
+ },
+ }
+)
diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go
new file mode 100644
index 000000000..0aca74d88
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -0,0 +1,185 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package interpreter provides functions to evaluate parsed expressions with
+// the option to augment the evaluation with inputs and functions supplied at
+// evaluation time.
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Interpreter generates a new Interpretable from a checked or unchecked expression.
+type Interpreter interface {
+ // NewInterpretable creates an Interpretable from a checked expression and an
+ // optional list of InterpretableDecorator values.
+ NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error)
+}
+
+// EvalObserver is a functional interface that accepts an expression id and an observed value.
+// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that
+// was evaluated and value is the result of the evaluation.
+type EvalObserver func(id int64, programStep any, value ref.Val)
+
+// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable
+// or Qualifier during program evaluation.
+func Observe(observers ...EvalObserver) InterpretableDecorator {
+ if len(observers) == 1 {
+ return decObserveEval(observers[0])
+ }
+ observeFn := func(id int64, programStep any, val ref.Val) {
+ for _, observer := range observers {
+ observer(id, programStep, val)
+ }
+ }
+ return decObserveEval(observeFn)
+}
+
+// EvalCancelledError represents a cancelled program evaluation operation.
+type EvalCancelledError struct {
+ Message string
+ // Type identifies the cause of the cancellation.
+ Cause CancellationCause
+}
+
+func (e EvalCancelledError) Error() string {
+ return e.Message
+}
+
+// CancellationCause enumerates the ways a program evaluation operation can be cancelled.
+type CancellationCause int
+
+const (
+ // ContextCancelled indicates that the operation was cancelled in response to a Golang context cancellation.
+ ContextCancelled CancellationCause = iota
+
+ // CostLimitExceeded indicates that the operation was cancelled in response to the actual cost limit being
+ // exceeded.
+ CostLimitExceeded
+)
+
+// TODO: Replace all usages of TrackState with EvalStateObserver
+
+// TrackState decorates each expression node with an observer which records the value
+// associated with the given expression id. EvalState must be provided to the decorator.
+// This decorator is not thread-safe, and the EvalState must be reset between Eval()
+// calls.
+// DEPRECATED: Please use EvalStateObserver instead. It composes gracefully with additional observers.
+func TrackState(state EvalState) InterpretableDecorator {
+ return Observe(EvalStateObserver(state))
+}
+
+// EvalStateObserver provides an observer which records the value
+// associated with the given expression id. EvalState must be provided to the observer.
+// This decorator is not thread-safe, and the EvalState must be reset between Eval()
+// calls.
+func EvalStateObserver(state EvalState) EvalObserver {
+ return func(id int64, programStep any, val ref.Val) {
+ state.SetValue(id, val)
+ }
+}
+
+// ExhaustiveEval replaces operations that short-circuit with versions that evaluate
+// expressions and couples this behavior with the TrackState() decorator to provide
+// insight into the evaluation state of the entire expression. EvalState must be
+// provided to the decorator. This decorator is not thread-safe, and the EvalState
+// must be reset between Eval() calls.
+func ExhaustiveEval() InterpretableDecorator {
+ ex := decDisableShortcircuits()
+ return func(i Interpretable) (Interpretable, error) {
+ return ex(i)
+ }
+}
+
+// InterruptableEval annotates comprehension loops with information that indicates they
+// should check the `#interrupted` state within a custom Activation.
+//
+// The custom activation is currently managed higher up in the stack within the 'cel' package
+// and should not require any custom support on behalf of callers.
+func InterruptableEval() InterpretableDecorator {
+ return decInterruptFolds()
+}
+
+// Optimize will pre-compute operations such as list and map construction and optimize
+// call arguments to set membership tests. The set of optimizations will increase over time.
+func Optimize() InterpretableDecorator {
+ return decOptimize()
+}
+
+// RegexOptimization provides a way to replace an InterpretableCall for a regex function when the
+// RegexIndex argument is a string constant. Typically, the Factory would compile the regex pattern at
+// RegexIndex and report any errors (at program creation time) and then use the compiled regex for
+// all regex function invocations.
+type RegexOptimization struct {
+ // Function is the name of the function to optimize.
+ Function string
+ // OverloadID is the ID of the overload to optimize.
+ OverloadID string
+ // RegexIndex is the index position of the regex pattern argument. Only calls to the function where this argument is
+ // a string constant will be delegated to this optimizer.
+ RegexIndex int
+ // Factory constructs a replacement InterpretableCall node that optimizes the regex function call. Factory is
+ // provided with the unoptimized regex call and the string constant at the RegexIndex argument.
+ // The Factory may compile the regex for use across all invocations of the call, return any errors and
+ // return an interpreter.NewCall with the desired regex optimized function impl.
+ Factory func(call InterpretableCall, regexPattern string) (InterpretableCall, error)
+}
+
+// CompileRegexConstants compiles regex pattern string constants at program creation time and reports any regex pattern
+// compile errors.
+func CompileRegexConstants(regexOptimizations ...*RegexOptimization) InterpretableDecorator {
+ return decRegexOptimizer(regexOptimizations...)
+}
+
+type exprInterpreter struct {
+ dispatcher Dispatcher
+ container *containers.Container
+ provider types.Provider
+ adapter types.Adapter
+ attrFactory AttributeFactory
+}
+
+// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used
+// throughout the Eval of all Interpretable instances generated from it.
+func NewInterpreter(dispatcher Dispatcher,
+ container *containers.Container,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory) Interpreter {
+ return &exprInterpreter{
+ dispatcher: dispatcher,
+ container: container,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory}
+}
+
+// NewIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewInterpretable(
+ checked *ast.AST,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.attrFactory,
+ i.container,
+ checked,
+ decorators...)
+ return p.Plan(checked.Expr())
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/optimizations.go b/vendor/github.com/google/cel-go/interpreter/optimizations.go
new file mode 100644
index 000000000..2fc87e693
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/optimizations.go
@@ -0,0 +1,46 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "regexp"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// MatchesRegexOptimization optimizes the 'matches' standard library function by compiling the regex pattern and
+// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function
+// call invocations.
+var MatchesRegexOptimization = &RegexOptimization{
+ Function: "matches",
+ RegexIndex: 1,
+ Factory: func(call InterpretableCall, regexPattern string) (InterpretableCall, error) {
+ compiledRegex, err := regexp.Compile(regexPattern)
+ if err != nil {
+ return nil, err
+ }
+ return NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(values ...ref.Val) ref.Val {
+ if len(values) != 2 {
+ return types.NoSuchOverloadErr()
+ }
+ in, ok := values[0].Value().(string)
+ if !ok {
+ return types.NoSuchOverloadErr()
+ }
+ return types.Bool(compiledRegex.MatchString(in))
+ }), nil
+ },
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go
new file mode 100644
index 000000000..3d918ce87
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -0,0 +1,757 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/functions"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+)
+
+// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value.
+type interpretablePlanner interface {
+ // Plan generates an Interpretable value (or error) from the input proto Expr.
+ Plan(expr ast.Expr) (Interpretable, error)
+}
+
+// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve
+// functions, types, and namespaced identifiers at plan time rather than at runtime since
+// it only needs to be done once and may be semi-expensive to compute.
+func newPlanner(disp Dispatcher,
+ provider types.Provider,
+ adapter types.Adapter,
+ attrFactory AttributeFactory,
+ cont *containers.Container,
+ exprAST *ast.AST,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ attrFactory: attrFactory,
+ container: cont,
+ refMap: exprAST.ReferenceMap(),
+ typeMap: exprAST.TypeMap(),
+ decorators: decorators,
+ }
+}
+
+// planner is an implementation of the interpretablePlanner interface.
+type planner struct {
+ disp Dispatcher
+ provider types.Provider
+ adapter types.Adapter
+ attrFactory AttributeFactory
+ container *containers.Container
+ refMap map[int64]*ast.ReferenceInfo
+ typeMap map[int64]*types.Type
+ decorators []InterpretableDecorator
+}
+
+// Plan implements the interpretablePlanner interface. This implementation of the Plan method also
+// applies decorators to each Interpretable generated as part of the overall plan. Decorators are
+// useful for layering functionality into the evaluation that is not natively understood by CEL,
+// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of
+// repeated expressions.
+func (p *planner) Plan(expr ast.Expr) (Interpretable, error) {
+ switch expr.Kind() {
+ case ast.CallKind:
+ return p.decorate(p.planCall(expr))
+ case ast.IdentKind:
+ return p.decorate(p.planIdent(expr))
+ case ast.LiteralKind:
+ return p.decorate(p.planConst(expr))
+ case ast.SelectKind:
+ return p.decorate(p.planSelect(expr))
+ case ast.ListKind:
+ return p.decorate(p.planCreateList(expr))
+ case ast.MapKind:
+ return p.decorate(p.planCreateMap(expr))
+ case ast.StructKind:
+ return p.decorate(p.planCreateStruct(expr))
+ case ast.ComprehensionKind:
+ return p.decorate(p.planComprehension(expr))
+ }
+ return nil, fmt.Errorf("unsupported expr: %v", expr)
+}
+
+// decorate applies the InterpretableDecorator functions to the given Interpretable.
+// Both the Interpretable and error generated by a Plan step are accepted as arguments
+// for convenience.
+func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, dec := range p.decorators {
+ i, err = dec(i)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return i, nil
+}
+
+// planIdent creates an Interpretable that resolves an identifier from an Activation.
+func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) {
+ // Establish whether the identifier is in the reference map.
+ if identRef, found := p.refMap[expr.ID()]; found {
+ return p.planCheckedIdent(expr.ID(), identRef)
+ }
+ // Create the possible attribute list for the unresolved reference.
+ ident := expr.AsIdent()
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.MaybeAttribute(expr.ID(), ident),
+ }, nil
+}
+
+func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) {
+ // Plan a constant reference if this is the case for this simple identifier.
+ if identRef.Value != nil {
+ return NewConstValue(id, identRef.Value), nil
+ }
+
+ // Check to see whether the type map indicates this is a type name. All types should be
+ // registered with the provider.
+ cType := p.typeMap[id]
+ if cType.Kind() == types.TypeKind {
+ cVal, found := p.provider.FindIdent(identRef.Name)
+ if !found {
+ return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name)
+ }
+ return NewConstValue(id, cVal), nil
+ }
+
+ // Otherwise, return the attribute for the resolved identifier name.
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name),
+ }, nil
+}
+
+// planSelect creates an Interpretable with either:
+//
+// a) selects a field from a map or proto.
+// b) creates a field presence test for a select within a has() macro.
+// c) resolves the select expression to a namespaced identifier.
+func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) {
+ // If the Select id appears in the reference map from the CheckedExpr proto then it is either
+ // a namespaced identifier or enum value.
+ if identRef, found := p.refMap[expr.ID()]; found {
+ return p.planCheckedIdent(expr.ID(), identRef)
+ }
+
+ sel := expr.AsSelect()
+ // Plan the operand evaluation.
+ op, err := p.Plan(sel.Operand())
+ if err != nil {
+ return nil, err
+ }
+ opType := p.typeMap[sel.Operand().ID()]
+
+ // If the Select was marked TestOnly, this is a presence test.
+ //
+ // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json)
+ // as follows:
+ // - True if the object field has a non-default value, e.g. obj.str != ""
+ // - True if the dynamic value has the field defined, e.g. key in map
+ //
+ // However, presence tests are not defined for qualified identifier names with primitive types.
+ // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
+ // it is not clear whether has should error or follow the convention defined for structured
+ // values.
+
+ // Establish the attribute reference.
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Build a qualifier for the attribute.
+ qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false)
+ if err != nil {
+ return nil, err
+ }
+ // Modify the attribute to be test-only.
+ if sel.IsTestOnly() {
+ attr = &evalTestOnly{
+ id: expr.ID(),
+ InterpretableAttribute: attr,
+ }
+ }
+ // Append the qualifier on the attribute.
+ _, err = attr.AddQualifier(qual)
+ return attr, err
+}
+
+// planCall creates a callable Interpretable while specializing for common functions and invocation
+// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in
+// optimized Interpretable values.
+func (p *planner) planCall(expr ast.Expr) (Interpretable, error) {
+ call := expr.AsCall()
+ target, fnName, oName := p.resolveFunction(expr)
+ argCount := len(call.Args())
+ var offset int
+ if target != nil {
+ argCount++
+ offset++
+ }
+
+ args := make([]Interpretable, argCount)
+ if target != nil {
+ arg, err := p.Plan(target)
+ if err != nil {
+ return nil, err
+ }
+ args[0] = arg
+ }
+ for i, argExpr := range call.Args() {
+ arg, err := p.Plan(argExpr)
+ if err != nil {
+ return nil, err
+ }
+ args[i+offset] = arg
+ }
+
+ // Generate specialized Interpretable operators by function name if possible.
+ switch fnName {
+ case operators.LogicalAnd:
+ return p.planCallLogicalAnd(expr, args)
+ case operators.LogicalOr:
+ return p.planCallLogicalOr(expr, args)
+ case operators.Conditional:
+ return p.planCallConditional(expr, args)
+ case operators.Equals:
+ return p.planCallEqual(expr, args)
+ case operators.NotEquals:
+ return p.planCallNotEqual(expr, args)
+ case operators.Index:
+ return p.planCallIndex(expr, args, false)
+ case operators.OptSelect, operators.OptIndex:
+ return p.planCallIndex(expr, args, true)
+ }
+
+ // Otherwise, generate Interpretable calls specialized by argument count.
+ // Try to find the specific function by overload id.
+ var fnDef *functions.Overload
+ if oName != "" {
+ fnDef, _ = p.disp.FindOverload(oName)
+ }
+ // If the overload id couldn't resolve the function, try the simple function name.
+ if fnDef == nil {
+ fnDef, _ = p.disp.FindOverload(fnName)
+ }
+ switch argCount {
+ case 0:
+ return p.planCallZero(expr, fnName, oName, fnDef)
+ case 1:
+ // If the FunctionOp has been used, then use it as it may exist for the purposes
+ // of dynamic dispatch within a singleton function implementation.
+ if fnDef != nil && fnDef.Unary == nil && fnDef.Function != nil {
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+ return p.planCallUnary(expr, fnName, oName, fnDef, args)
+ case 2:
+ // If the FunctionOp has been used, then use it as it may exist for the purposes
+ // of dynamic dispatch within a singleton function implementation.
+ if fnDef != nil && fnDef.Binary == nil && fnDef.Function != nil {
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+ return p.planCallBinary(expr, fnName, oName, fnDef, args)
+ default:
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+}
+
+// planCallZero generates a zero-arity callable Interpretable.
+func (p *planner) planCallZero(expr ast.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload) (Interpretable, error) {
+ if impl == nil || impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s()", function)
+ }
+ return &evalZeroArity{
+ id: expr.ID(),
+ function: function,
+ overload: overload,
+ impl: impl.Function,
+ }, nil
+}
+
+// planCallUnary generates a unary callable Interpretable.
+func (p *planner) planCallUnary(expr ast.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.UnaryOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Unary == nil {
+ return nil, fmt.Errorf("no such overload: %s(arg)", function)
+ }
+ fn = impl.Unary
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalUnary{
+ id: expr.ID(),
+ function: function,
+ overload: overload,
+ arg: args[0],
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallBinary generates a binary callable Interpretable.
+func (p *planner) planCallBinary(expr ast.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.BinaryOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Binary == nil {
+ return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function)
+ }
+ fn = impl.Binary
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalBinary{
+ id: expr.ID(),
+ function: function,
+ overload: overload,
+ lhs: args[0],
+ rhs: args[1],
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallVarArgs generates a variable argument callable Interpretable.
+func (p *planner) planCallVarArgs(expr ast.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.FunctionOp
+ var trait int
+ var nonStrict bool
+ if impl != nil {
+ if impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s(...)", function)
+ }
+ fn = impl.Function
+ trait = impl.OperandTrait
+ nonStrict = impl.NonStrict
+ }
+ return &evalVarArgs{
+ id: expr.ID(),
+ function: function,
+ overload: overload,
+ args: args,
+ trait: trait,
+ impl: fn,
+ nonStrict: nonStrict,
+ }, nil
+}
+
+// planCallEqual generates an equals (==) Interpretable.
+func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalEq{
+ id: expr.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallNotEqual generates a not equals (!=) Interpretable.
+func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalNe{
+ id: expr.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallLogicalAnd generates a logical and (&&) Interpretable.
+func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalAnd{
+ id: expr.ID(),
+ terms: args,
+ }, nil
+}
+
+// planCallLogicalOr generates a logical or (||) Interpretable.
+func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+ return &evalOr{
+ id: expr.ID(),
+ terms: args,
+ }, nil
+}
+
+// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
+func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) {
+ cond := args[0]
+ t := args[1]
+ var tAttr Attribute
+ truthyAttr, isTruthyAttr := t.(InterpretableAttribute)
+ if isTruthyAttr {
+ tAttr = truthyAttr.Attr()
+ } else {
+ tAttr = p.attrFactory.RelativeAttribute(t.ID(), t)
+ }
+
+ f := args[2]
+ var fAttr Attribute
+ falsyAttr, isFalsyAttr := f.(InterpretableAttribute)
+ if isFalsyAttr {
+ fAttr = falsyAttr.Attr()
+ } else {
+ fAttr = p.attrFactory.RelativeAttribute(f.ID(), f)
+ }
+
+ return &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr),
+ }, nil
+}
+
+// planCallIndex either extends an attribute with the argument to the index operation, or creates
+// a relative attribute based on the return of a function call or operation.
+func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) {
+ op := args[0]
+ ind := args[1]
+ opType := p.typeMap[op.ID()]
+
+ // Establish the attribute reference.
+ var err error
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Construct the qualifier type.
+ var qual Qualifier
+ switch ind := ind.(type) {
+ case InterpretableConst:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional)
+ case InterpretableAttribute:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional)
+ default:
+ qual, err = p.relativeAttr(expr.ID(), ind, optional)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the qualifier to the attribute
+ _, err = attr.AddQualifier(qual)
+ return attr, err
+}
+
+// planCreateList generates a list construction Interpretable.
+func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) {
+ list := expr.AsList()
+ optionalIndices := list.OptionalIndices()
+ elements := list.Elements()
+ optionals := make([]bool, len(elements))
+ for _, index := range optionalIndices {
+ if index < 0 || index >= int32(len(elements)) {
+ return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements))
+ }
+ optionals[index] = true
+ }
+ elems := make([]Interpretable, len(elements))
+ for i, elem := range elements {
+ elemVal, err := p.Plan(elem)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elemVal
+ }
+ return &evalList{
+ id: expr.ID(),
+ elems: elems,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateStruct generates a map or object construction Interpretable.
+func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) {
+ m := expr.AsMap()
+ entries := m.Entries()
+ optionals := make([]bool, len(entries))
+ keys := make([]Interpretable, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, e := range entries {
+ entry := e.AsMapEntry()
+ keyVal, err := p.Plan(entry.Key())
+ if err != nil {
+ return nil, err
+ }
+ keys[i] = keyVal
+
+ valVal, err := p.Plan(entry.Value())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = valVal
+ optionals[i] = entry.IsOptional()
+ }
+ return &evalMap{
+ id: expr.ID(),
+ keys: keys,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateObj generates an object construction Interpretable.
+func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) {
+ obj := expr.AsStruct()
+ typeName, defined := p.resolveTypeName(obj.TypeName())
+ if !defined {
+ return nil, fmt.Errorf("unknown type: %s", obj.TypeName())
+ }
+ objFields := obj.Fields()
+ optionals := make([]bool, len(objFields))
+ fields := make([]string, len(objFields))
+ vals := make([]Interpretable, len(objFields))
+ for i, f := range objFields {
+ field := f.AsStructField()
+ fields[i] = field.Name()
+ val, err := p.Plan(field.Value())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = val
+ optionals[i] = field.IsOptional()
+ }
+ return &evalObj{
+ id: expr.ID(),
+ typeName: typeName,
+ fields: fields,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ provider: p.provider,
+ }, nil
+}
+
+// planComprehension generates an Interpretable fold operation.
+func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) {
+ fold := expr.AsComprehension()
+ accu, err := p.Plan(fold.AccuInit())
+ if err != nil {
+ return nil, err
+ }
+ iterRange, err := p.Plan(fold.IterRange())
+ if err != nil {
+ return nil, err
+ }
+ cond, err := p.Plan(fold.LoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ step, err := p.Plan(fold.LoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := p.Plan(fold.Result())
+ if err != nil {
+ return nil, err
+ }
+ return &evalFold{
+ id: expr.ID(),
+ accuVar: fold.AccuVar(),
+ accu: accu,
+ iterVar: fold.IterVar(),
+ iterVar2: fold.IterVar2(),
+ iterRange: iterRange,
+ cond: cond,
+ step: step,
+ result: result,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planConst generates a constant valued Interpretable.
+func (p *planner) planConst(expr ast.Expr) (Interpretable, error) {
+ return NewConstValue(expr.ID(), expr.AsLiteral()), nil
+}
+
+// resolveTypeName takes a qualified string constructed at parse time, applies the proto
+// namespace resolution rules to it in a scan over possible matching types in the TypeProvider.
+func (p *planner) resolveTypeName(typeName string) (string, bool) {
+ for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) {
+ if _, found := p.provider.FindStructType(qualifiedTypeName); found {
+ return qualifiedTypeName, true
+ }
+ }
+ return "", false
+}
+
+// resolveFunction determines the call target, function name, and overload name from a given Expr
+// value.
+//
+// The resolveFunction resolves ambiguities where a function may either be a receiver-style
+// invocation or a qualified global function name.
+// - The target expression may only consist of ident and select expressions.
+// - The function is declared in the environment using its fully-qualified name.
+// - The fully-qualified function name matches the string serialized target value.
+func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) {
+ // Note: similar logic exists within the `checker/checker.go`. If making changes here
+ // please consider the impact on checker.go and consolidate implementations or mirror code
+ // as appropriate.
+ call := expr.AsCall()
+ var target ast.Expr = nil
+ if call.IsMemberFunction() {
+ target = call.Target()
+ }
+ fnName := call.FunctionName()
+
+ // Checked expressions always have a reference map entry, and _should_ have the fully qualified
+ // function name as the fnName value.
+ oRef, hasOverload := p.refMap[expr.ID()]
+ if hasOverload {
+ if len(oRef.OverloadIDs) == 1 {
+ return target, fnName, oRef.OverloadIDs[0]
+ }
+ // Note, this namespaced function name will not appear as a fully qualified name in ASTs
+ // built and stored before cel-go v0.5.0; however, this functionality did not work at all
+ // before the v0.5.0 release.
+ return target, fnName, ""
+ }
+
+ // Parse-only expressions need to handle the same logic as is normally performed at check time,
+ // but with potentially much less information. The only reliable source of information about
+ // which functions are configured is the dispatcher.
+ if target == nil {
+ // If the user has a parse-only expression, then it should have been configured as such in
+ // the interpreter dispatcher as it may have been omitted from the checker environment.
+ for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) {
+ _, found := p.disp.FindOverload(qualifiedName)
+ if found {
+ return nil, qualifiedName, ""
+ }
+ }
+ // It's possible that the overload was not found, but this situation is accounted for in
+ // the planCall phase; however, the leading dot used for denoting fully-qualified
+ // namespaced identifiers must be stripped, as all declarations already use fully-qualified
+ // names. This stripping behavior is handled automatically by the ResolveCandidateNames
+ // call.
+ return target, stripLeadingDot(fnName), ""
+ }
+
+ // Handle the situation where the function target actually indicates a qualified function name.
+ qualifiedPrefix, maybeQualified := p.toQualifiedName(target)
+ if maybeQualified {
+ maybeQualifiedName := qualifiedPrefix + "." + fnName
+ for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) {
+ _, found := p.disp.FindOverload(qualifiedName)
+ if found {
+ // Clear the target to ensure the proper arity is used for finding the
+ // implementation.
+ return nil, qualifiedName, ""
+ }
+ }
+ }
+ // In the default case, the function is exactly as it was advertised: a receiver call on with
+ // an expression-based target with the given simple function name.
+ return target, fnName, ""
+}
+
+// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to
+// be observed to ensure that it's evaluation value is properly recorded for state tracking.
+func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) {
+ eAttr, ok := eval.(InterpretableAttribute)
+ if !ok {
+ eAttr = &evalAttr{
+ adapter: p.adapter,
+ attr: p.attrFactory.RelativeAttribute(id, eval),
+ optional: opt,
+ }
+ }
+ // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute
+ decAttr, err := p.decorate(eAttr, nil)
+ if err != nil {
+ return nil, err
+ }
+ eAttr, ok = decAttr.(InterpretableAttribute)
+ if !ok {
+ return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr)
+ }
+ return eAttr, nil
+}
+
+// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean
+// 'found' value that indicates if the conversion is successful.
+func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) {
+ // If the checker identified the expression as an attribute by the type-checker, then it can't
+ // possibly be part of qualified name in a namespace.
+ _, isAttr := p.refMap[operand.ID()]
+ if isAttr {
+ return "", false
+ }
+ // Since functions cannot be both namespaced and receiver functions, if the operand is not an
+ // qualified variable name, return the (possibly) qualified name given the expressions.
+ switch operand.Kind() {
+ case ast.IdentKind:
+ id := operand.AsIdent()
+ return id, true
+ case ast.SelectKind:
+ sel := operand.AsSelect()
+ // Test only expressions are not valid as qualified names.
+ if sel.IsTestOnly() {
+ return "", false
+ }
+ if qual, found := p.toQualifiedName(sel.Operand()); found {
+ return qual + "." + sel.FieldName(), true
+ }
+ }
+ return "", false
+}
+
+func stripLeadingDot(name string) string {
+ if strings.HasPrefix(name, ".") {
+ return name[1:]
+ }
+ return name
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go
new file mode 100644
index 000000000..410d80dc4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -0,0 +1,543 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+type astPruner struct {
+ ast.ExprFactory
+ expr ast.Expr
+ macroCalls map[int64]ast.Expr
+ state EvalState
+ nextExprID int64
+}
+
+// TODO Consider having a separate walk of the AST that finds common
+// subexpressions. This can be called before or after constant folding to find
+// common subexpressions.
+
+// PruneAst prunes the given AST based on the given EvalState and generates a new AST.
+// Given AST is copied on write and a new AST is returned.
+// Couple of typical use cases this interface would be:
+//
+// A)
+// 1) Evaluate expr with some unknowns,
+// 2) If result is unknown:
+//
+// a) PruneAst
+// b) Goto 1
+//
+// Functional call results which are known would be effectively cached across
+// iterations.
+//
+// B)
+// 1) Compile the expression (maybe via a service and maybe after checking a
+//
+// compiled expression does not exists in local cache)
+//
+// 2) Prepare the environment and the interpreter. Activation might be empty.
+// 3) Eval the expression. This might return unknown or error or a concrete
+//
+// value.
+//
+// 4) PruneAst
+// 4) Maybe cache the expression
+// This is effectively constant folding the expression. How the environment is
+// prepared in step 2 is flexible. For example, If the caller caches the
+// compiled and constant folded expressions, but is not willing to constant
+// fold(and thus cache results of) some external calls, then they can prepare
+// the overloads accordingly.
+func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST {
+ pruneState := NewEvalState()
+ for _, id := range state.IDs() {
+ v, _ := state.Value(id)
+ pruneState.SetValue(id, v)
+ }
+ pruner := &astPruner{
+ ExprFactory: ast.NewExprFactory(),
+ expr: expr,
+ macroCalls: macroCalls,
+ state: pruneState,
+ nextExprID: getMaxID(expr)}
+ newExpr, _ := pruner.maybePrune(expr)
+ newInfo := ast.NewSourceInfo(nil)
+ for id, call := range pruner.macroCalls {
+ newInfo.SetMacroCall(id, call)
+ }
+ return ast.NewAST(newExpr, newInfo)
+}
+
+func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) {
+ switch v := val.(type) {
+ case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint:
+ p.state.SetValue(id, val)
+ return p.NewLiteral(id, val), true
+ case types.Duration:
+ p.state.SetValue(id, val)
+ durationString := v.ConvertToType(types.StringType).(types.String)
+ return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true
+ case types.Timestamp:
+ timestampString := v.ConvertToType(types.StringType).(types.String)
+ return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true
+ }
+
+ // Attempt to build a list literal.
+ if list, isList := val.(traits.Lister); isList {
+ sz := list.Size().(types.Int)
+ elemExprs := make([]ast.Expr, sz)
+ for i := types.Int(0); i < sz; i++ {
+ elem := list.Get(i)
+ if types.IsUnknownOrError(elem) {
+ return nil, false
+ }
+ elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem)
+ if !ok {
+ return nil, false
+ }
+ elemExprs[i] = elemExpr
+ }
+ p.state.SetValue(id, val)
+ return p.NewList(id, elemExprs, []int32{}), true
+ }
+
+ // Create a map literal if possible.
+ if mp, isMap := val.(traits.Mapper); isMap {
+ it := mp.Iterator()
+ entries := make([]ast.EntryExpr, mp.Size().(types.Int))
+ i := 0
+ for it.HasNext() != types.False {
+ key := it.Next()
+ val := mp.Get(key)
+ if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) {
+ return nil, false
+ }
+ keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key)
+ if !ok {
+ return nil, false
+ }
+ valExpr, ok := p.maybeCreateLiteral(p.nextID(), val)
+ if !ok {
+ return nil, false
+ }
+ entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false)
+ entries[i] = entry
+ i++
+ }
+ p.state.SetValue(id, val)
+ return p.NewMap(id, entries), true
+ }
+
+ // TODO(issues/377) To construct message literals, the type provider will need to support
+ // the enumeration the fields for a given message.
+ return nil, false
+}
+
+func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) {
+ elemVal, found := p.value(elem.ID())
+ if found && elemVal.Type() == types.OptionalType {
+ opt := elemVal.(*types.Optional)
+ if !opt.HasValue() {
+ return nil, true
+ }
+ if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned {
+ return newElem, true
+ }
+ }
+ return elem, false
+}
+
+func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) {
+ // elem in list
+ call := node.AsCall()
+ val, exists := p.maybeValue(call.Args()[1].ID())
+ if !exists {
+ return nil, false
+ }
+ if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero {
+ return p.maybeCreateLiteral(node.ID(), types.False)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) {
+ call := node.AsCall()
+ arg := call.Args()[0]
+ val, exists := p.maybeValue(arg.ID())
+ if !exists {
+ return nil, false
+ }
+ if b, ok := val.(types.Bool); ok {
+ return p.maybeCreateLiteral(node.ID(), !b)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) {
+ call := node.AsCall()
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if v, exists := p.maybeValue(call.Args()[0].ID()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.ID(), types.True)
+ }
+ return call.Args()[1], true
+ }
+ if v, exists := p.maybeValue(call.Args()[1].ID()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.ID(), types.True)
+ }
+ return call.Args()[0], true
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) {
+ call := node.AsCall()
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if v, exists := p.maybeValue(call.Args()[0].ID()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.ID(), types.False)
+ }
+ return call.Args()[1], true
+ }
+ if v, exists := p.maybeValue(call.Args()[1].ID()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.ID(), types.False)
+ }
+ return call.Args()[0], true
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) {
+ call := node.AsCall()
+ cond, exists := p.maybeValue(call.Args()[0].ID())
+ if !exists {
+ return nil, false
+ }
+ if cond.Value().(bool) {
+ return call.Args()[1], true
+ }
+ return call.Args()[2], true
+}
+
+func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) {
+ if _, exists := p.value(node.ID()); !exists {
+ return nil, false
+ }
+ call := node.AsCall()
+ if call.FunctionName() == operators.LogicalOr {
+ return p.maybePruneOr(node)
+ }
+ if call.FunctionName() == operators.LogicalAnd {
+ return p.maybePruneAnd(node)
+ }
+ if call.FunctionName() == operators.Conditional {
+ return p.maybePruneConditional(node)
+ }
+ if call.FunctionName() == operators.In {
+ return p.maybePruneIn(node)
+ }
+ if call.FunctionName() == operators.LogicalNot {
+ return p.maybePruneLogicalNot(node)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) {
+ return p.prune(node)
+}
+
+func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) {
+ if node == nil {
+ return node, false
+ }
+ val, valueExists := p.maybeValue(node.ID())
+ if valueExists {
+ if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok {
+ delete(p.macroCalls, node.ID())
+ return newNode, true
+ }
+ }
+ if macro, found := p.macroCalls[node.ID()]; found {
+ // Ensure that intermediate values for the comprehension are cleared during pruning
+ if node.Kind() == ast.ComprehensionKind {
+ compre := node.AsComprehension()
+ visit(macro, clearIterVarVisitor(compre.IterVar(), p.state))
+ }
+ // prune the expression in terms of the macro call instead of the expanded form.
+ if newMacro, pruned := p.prune(macro); pruned {
+ p.macroCalls[node.ID()] = newMacro
+ }
+ }
+
+ // We have either an unknown/error value, or something we don't want to
+ // transform, or expression was not evaluated. If possible, drill down
+ // more.
+ switch node.Kind() {
+ case ast.SelectKind:
+ sel := node.AsSelect()
+ if operand, isPruned := p.maybePrune(sel.Operand()); isPruned {
+ if sel.IsTestOnly() {
+ return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true
+ }
+ return p.NewSelect(node.ID(), operand, sel.FieldName()), true
+ }
+ case ast.CallKind:
+ argsPruned := false
+ call := node.AsCall()
+ args := call.Args()
+ newArgs := make([]ast.Expr, len(args))
+ for i, a := range args {
+ newArgs[i] = a
+ if arg, isPruned := p.maybePrune(a); isPruned {
+ argsPruned = true
+ newArgs[i] = arg
+ }
+ }
+ if !call.IsMemberFunction() {
+ newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...)
+ if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned {
+ return prunedCall, true
+ }
+ return newCall, argsPruned
+ }
+ newTarget := call.Target()
+ targetPruned := false
+ if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned {
+ targetPruned = true
+ newTarget = prunedTarget
+ }
+ newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...)
+ if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned {
+ return prunedCall, true
+ }
+ return newCall, targetPruned || argsPruned
+ case ast.ListKind:
+ l := node.AsList()
+ elems := l.Elements()
+ optIndices := l.OptionalIndices()
+ optIndexMap := map[int32]bool{}
+ for _, i := range optIndices {
+ optIndexMap[i] = true
+ }
+ newOptIndexMap := make(map[int32]bool, len(optIndexMap))
+ newElems := make([]ast.Expr, 0, len(elems))
+ var listPruned bool
+ prunedIdx := 0
+ for i, elem := range elems {
+ _, isOpt := optIndexMap[int32(i)]
+ if isOpt {
+ newElem, pruned := p.maybePruneOptional(elem)
+ if pruned {
+ listPruned = true
+ if newElem != nil {
+ newElems = append(newElems, newElem)
+ prunedIdx++
+ }
+ continue
+ }
+ newOptIndexMap[int32(prunedIdx)] = true
+ }
+ if newElem, prunedElem := p.maybePrune(elem); prunedElem {
+ newElems = append(newElems, newElem)
+ listPruned = true
+ } else {
+ newElems = append(newElems, elem)
+ }
+ prunedIdx++
+ }
+ optIndices = make([]int32, len(newOptIndexMap))
+ idx := 0
+ for i := range newOptIndexMap {
+ optIndices[idx] = i
+ idx++
+ }
+ if listPruned {
+ return p.NewList(node.ID(), newElems, optIndices), true
+ }
+ case ast.MapKind:
+ var mapPruned bool
+ m := node.AsMap()
+ entries := m.Entries()
+ newEntries := make([]ast.EntryExpr, len(entries))
+ for i, entry := range entries {
+ newEntries[i] = entry
+ e := entry.AsMapEntry()
+ newKey, keyPruned := p.maybePrune(e.Key())
+ newValue, valuePruned := p.maybePrune(e.Value())
+ if !keyPruned && !valuePruned {
+ continue
+ }
+ mapPruned = true
+ newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional())
+ newEntries[i] = newEntry
+ }
+ if mapPruned {
+ return p.NewMap(node.ID(), newEntries), true
+ }
+ case ast.StructKind:
+ var structPruned bool
+ obj := node.AsStruct()
+ fields := obj.Fields()
+ newFields := make([]ast.EntryExpr, len(fields))
+ for i, field := range fields {
+ newFields[i] = field
+ f := field.AsStructField()
+ newValue, prunedValue := p.maybePrune(f.Value())
+ if !prunedValue {
+ continue
+ }
+ structPruned = true
+ newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional())
+ newFields[i] = newEntry
+ }
+ if structPruned {
+ return p.NewStruct(node.ID(), obj.TypeName(), newFields), true
+ }
+ case ast.ComprehensionKind:
+ compre := node.AsComprehension()
+ // Only the range of the comprehension is pruned since the state tracking only records
+ // the last iteration of the comprehension and not each step in the evaluation which
+ // means that the any residuals computed in between might be inaccurate.
+ if newRange, pruned := p.maybePrune(compre.IterRange()); pruned {
+ return p.NewComprehension(
+ node.ID(),
+ newRange,
+ compre.IterVar(),
+ compre.AccuVar(),
+ compre.AccuInit(),
+ compre.LoopCondition(),
+ compre.LoopStep(),
+ compre.Result(),
+ ), true
+ }
+ }
+ return node, false
+}
+
+func (p *astPruner) value(id int64) (ref.Val, bool) {
+ val, found := p.state.Value(id)
+ return val, (found && val != nil)
+}
+
+func (p *astPruner) maybeValue(id int64) (ref.Val, bool) {
+ val, found := p.value(id)
+ if !found || types.IsUnknownOrError(val) {
+ return nil, false
+ }
+ return val, true
+}
+
+func (p *astPruner) nextID() int64 {
+ next := p.nextExprID
+ p.nextExprID++
+ return next
+}
+
+type astVisitor struct {
+ // visitEntry is called on every expr node, including those within a map/struct entry.
+ visitExpr func(expr ast.Expr)
+ // visitEntry is called before entering the key, value of a map/struct entry.
+ visitEntry func(entry ast.EntryExpr)
+}
+
+func getMaxID(expr ast.Expr) int64 {
+ maxID := int64(1)
+ visit(expr, maxIDVisitor(&maxID))
+ return maxID
+}
+
+func clearIterVarVisitor(varName string, state EvalState) astVisitor {
+ return astVisitor{
+ visitExpr: func(e ast.Expr) {
+ if e.Kind() == ast.IdentKind && e.AsIdent() == varName {
+ state.SetValue(e.ID(), nil)
+ }
+ },
+ }
+}
+
+func maxIDVisitor(maxID *int64) astVisitor {
+ return astVisitor{
+ visitExpr: func(e ast.Expr) {
+ if e.ID() >= *maxID {
+ *maxID = e.ID() + 1
+ }
+ },
+ visitEntry: func(e ast.EntryExpr) {
+ if e.ID() >= *maxID {
+ *maxID = e.ID() + 1
+ }
+ },
+ }
+}
+
+func visit(expr ast.Expr, visitor astVisitor) {
+ exprs := []ast.Expr{expr}
+ for len(exprs) != 0 {
+ e := exprs[0]
+ if visitor.visitExpr != nil {
+ visitor.visitExpr(e)
+ }
+ exprs = exprs[1:]
+ switch e.Kind() {
+ case ast.SelectKind:
+ exprs = append(exprs, e.AsSelect().Operand())
+ case ast.CallKind:
+ call := e.AsCall()
+ if call.Target() != nil {
+ exprs = append(exprs, call.Target())
+ }
+ exprs = append(exprs, call.Args()...)
+ case ast.ComprehensionKind:
+ compre := e.AsComprehension()
+ exprs = append(exprs,
+ compre.IterRange(),
+ compre.AccuInit(),
+ compre.LoopCondition(),
+ compre.LoopStep(),
+ compre.Result())
+ case ast.ListKind:
+ list := e.AsList()
+ exprs = append(exprs, list.Elements()...)
+ case ast.MapKind:
+ for _, entry := range e.AsMap().Entries() {
+ e := entry.AsMapEntry()
+ if visitor.visitEntry != nil {
+ visitor.visitEntry(entry)
+ }
+ exprs = append(exprs, e.Key())
+ exprs = append(exprs, e.Value())
+ }
+ case ast.StructKind:
+ for _, entry := range e.AsStruct().Fields() {
+ f := entry.AsStructField()
+ if visitor.visitEntry != nil {
+ visitor.visitEntry(entry)
+ }
+ exprs = append(exprs, f.Value())
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
new file mode 100644
index 000000000..b9b307c15
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
@@ -0,0 +1,316 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "math"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// WARNING: Any changes to cost calculations in this file require a corresponding change in checker/cost.go
+
+// ActualCostEstimator provides function call cost estimations at runtime
+// CallCost returns an estimated cost for the function overload invocation with the given args, or nil if it has no
+// estimate to provide. CEL attempts to provide reasonable estimates for its standard function library, so CallCost
+// should typically not need to provide an estimate for CELs standard function.
+type ActualCostEstimator interface {
+ CallCost(function, overloadID string, args []ref.Val, result ref.Val) *uint64
+}
+
+// CostObserver provides an observer that tracks runtime cost.
+func CostObserver(tracker *CostTracker) EvalObserver {
+ observer := func(id int64, programStep any, val ref.Val) {
+ switch t := programStep.(type) {
+ case ConstantQualifier:
+ // TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them
+ // and enable the below pop. Once enabled this can case can be collapsed into the Qualifier case.
+ tracker.cost++
+ case InterpretableConst:
+ // zero cost
+ case InterpretableAttribute:
+ switch a := t.Attr().(type) {
+ case *conditionalAttribute:
+ // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
+ tracker.stack.drop(a.falsy.ID(), a.truthy.ID(), a.expr.ID())
+ default:
+ tracker.stack.drop(t.Attr().ID())
+ tracker.cost += common.SelectAndIdentCost
+ }
+ if !tracker.presenceTestHasCost {
+ if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly {
+ tracker.cost -= common.SelectAndIdentCost
+ }
+ }
+ case *evalExhaustiveConditional:
+ // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
+ tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID())
+
+ // While the field names are identical, the boolean operation eval structs do not share an interface and so
+ // must be handled individually.
+ case *evalOr:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalAnd:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalExhaustiveOr:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalExhaustiveAnd:
+ for _, term := range t.terms {
+ tracker.stack.drop(term.ID())
+ }
+ case *evalFold:
+ tracker.stack.drop(t.iterRange.ID())
+ case Qualifier:
+ tracker.cost++
+ case InterpretableCall:
+ if argVals, ok := tracker.stack.dropArgs(t.Args()); ok {
+ tracker.cost += tracker.costCall(t, argVals, val)
+ }
+ case InterpretableConstructor:
+ tracker.stack.dropArgs(t.InitVals())
+ switch t.Type() {
+ case types.ListType:
+ tracker.cost += common.ListCreateBaseCost
+ case types.MapType:
+ tracker.cost += common.MapCreateBaseCost
+ default:
+ tracker.cost += common.StructCreateBaseCost
+ }
+ }
+ tracker.stack.push(val, id)
+
+ if tracker.Limit != nil && tracker.cost > *tracker.Limit {
+ panic(EvalCancelledError{Cause: CostLimitExceeded, Message: "operation cancelled: actual cost limit exceeded"})
+ }
+ }
+ return observer
+}
+
+// CostTrackerOption configures the behavior of CostTracker objects.
+type CostTrackerOption func(*CostTracker) error
+
+// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression
+// evaluation if the limit is exceeded.
+func CostTrackerLimit(limit uint64) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.Limit = &limit
+ return nil
+ }
+}
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.presenceTestHasCost = hasCost
+ return nil
+ }
+}
+
+// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values.
+func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) {
+ tracker := &CostTracker{
+ Estimator: estimator,
+ overloadTrackers: map[string]FunctionTracker{},
+ presenceTestHasCost: true,
+ }
+ for _, opt := range opts {
+ err := opt(tracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return tracker, nil
+}
+
+// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation.
+//
+// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or
+// optional cost tracking changes.
+func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.overloadTrackers[overloadID] = fnTracker
+ return nil
+ }
+}
+
+// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result.
+type FunctionTracker func(args []ref.Val, result ref.Val) *uint64
+
+// CostTracker represents the information needed for tracking runtime cost.
+type CostTracker struct {
+ Estimator ActualCostEstimator
+ overloadTrackers map[string]FunctionTracker
+ Limit *uint64
+ presenceTestHasCost bool
+
+ cost uint64
+ stack refValStack
+}
+
+// ActualCost returns the runtime cost
+func (c *CostTracker) ActualCost() uint64 {
+ return c.cost
+}
+
+func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 {
+ var cost uint64
+ if len(c.overloadTrackers) != 0 {
+ if tracker, found := c.overloadTrackers[call.OverloadID()]; found {
+ callCost := tracker(args, result)
+ if callCost != nil {
+ cost += *callCost
+ return cost
+ }
+ }
+ }
+ if c.Estimator != nil {
+ callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result)
+ if callCost != nil {
+ cost += *callCost
+ return cost
+ }
+ }
+ // if user didn't specify, the default way of calculating runtime cost would be used.
+ // if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation
+ switch call.OverloadID() {
+ // O(n) functions
+ case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
+ cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor))
+ case overloads.InList:
+ // If a list is composed entirely of constant values this is O(1), but we don't account for that here.
+ // We just assume all list containment checks are O(n).
+ cost += c.actualSize(args[1])
+ // O(min(m, n)) functions
+ case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
+ overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
+ overloads.Equals, overloads.NotEquals:
+ // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.),
+ // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost
+ // of 1.
+ lhsSize := c.actualSize(args[0])
+ rhsSize := c.actualSize(args[1])
+ minSize := lhsSize
+ if rhsSize < minSize {
+ minSize = rhsSize
+ }
+ cost += uint64(math.Ceil(float64(minSize) * common.StringTraversalCostFactor))
+ // O(m+n) functions
+ case overloads.AddString, overloads.AddBytes:
+ // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over.
+ cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor))
+ // O(nm) functions
+ case overloads.MatchesString:
+ // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+ // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+ // in case where string is empty but regex is still expensive.
+ strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor))
+ // We don't know how many expressions are in the regex, just the string length (a huge
+ // improvement here would be to somehow get a count the number of expressions in the regex or
+ // how many states are in the regex state machine and use that to measure regex cost).
+ // For now, we're making a guess that each expression in a regex is typically at least 4 chars
+ // in length.
+ regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor))
+ cost += strCost * regexCost
+ case overloads.ContainsString:
+ strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor))
+ substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor))
+ cost += strCost * substrCost
+
+ default:
+ // The following operations are assumed to have O(1) complexity.
+ // - AddList due to the implementation. Index lookup can be O(c) the
+ // number of concatenated lists, but we don't track that is cost calculations.
+ // - Conversions, since none perform a traversal of a type of unbound length.
+ // - Computing the size of strings, byte sequences, lists and maps.
+ // - Logical operations and all operators on fixed width scalars (comparisons, equality)
+ // - Any functions that don't have a declared cost either here or in provided ActualCostEstimator.
+ cost++
+
+ }
+ return cost
+}
+
+// actualSize returns the size of value
+func (c *CostTracker) actualSize(value ref.Val) uint64 {
+ if sz, ok := value.(traits.Sizer); ok {
+ return uint64(sz.Size().(types.Int))
+ }
+ return 1
+}
+
+type stackVal struct {
+ Val ref.Val
+ ID int64
+}
+
+// refValStack keeps track of values of the stack for cost calculation purposes
+type refValStack []stackVal
+
+func (s *refValStack) push(val ref.Val, id int64) {
+ value := stackVal{Val: val, ID: id}
+ *s = append(*s, value)
+}
+
+// TODO: Allowing drop and dropArgs to remove stack items above the IDs they are provided is a workaround. drop and dropArgs
+// should find and remove only the stack items matching the provided IDs once all attributes are properly pushed and popped from stack.
+
+// drop searches the stack for each ID and removes the ID and all stack items above it.
+// If none of the IDs are found, the stack is not modified.
+// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
+// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
+func (s *refValStack) drop(ids ...int64) {
+ for _, id := range ids {
+ for idx := len(*s) - 1; idx >= 0; idx-- {
+ if (*s)[idx].ID == id {
+ *s = (*s)[:idx]
+ break
+ }
+ }
+ }
+}
+
+// dropArgs searches the stack for all the args by their IDs, accumulates their associated ref.Vals and drops any
+// stack items above any of the arg IDs. If any of the IDs are not found the stack, false is returned.
+// Args are assumed to be found in the stack in reverse order, i.e. the last arg is expected to be found highest in
+// the stack.
+// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's
+// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped.
+func (s *refValStack) dropArgs(args []Interpretable) ([]ref.Val, bool) {
+ result := make([]ref.Val, len(args))
+argloop:
+ for nIdx := len(args) - 1; nIdx >= 0; nIdx-- {
+ for idx := len(*s) - 1; idx >= 0; idx-- {
+ if (*s)[idx].ID == args[nIdx].ID() {
+ el := (*s)[idx]
+ *s = (*s)[:idx]
+ result[nIdx] = el.Val
+ continue argloop
+ }
+ }
+ return nil, false
+ }
+ return result, true
+}
diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel
new file mode 100644
index 000000000..97bc9bd43
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel
@@ -0,0 +1,58 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "errors.go",
+ "helper.go",
+ "input.go",
+ "macro.go",
+ "options.go",
+ "parser.go",
+ "unescape.go",
+ "unparser.go",
+ ],
+ importpath = "github.com/google/cel-go/parser",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//common:go_default_library",
+ "//common/ast:go_default_library",
+ "//common/operators:go_default_library",
+ "//common/runes:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//parser/gen:go_default_library",
+ "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "helper_test.go",
+ "parser_test.go",
+ "unescape_test.go",
+ "unparser_test.go",
+ ],
+ embed = [
+ ":go_default_library",
+ ],
+ deps = [
+ "//common/ast:go_default_library",
+ "//common/debug:go_default_library",
+ "//common/types:go_default_library",
+ "//parser/gen:go_default_library",
+ "//test:go_default_library",
+ "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//testing/protocmp:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go
new file mode 100644
index 000000000..93ae7a3ad
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/errors.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+)
+
+// parseErrors is a specialization of Errors.
+type parseErrors struct {
+ errs *common.Errors
+}
+
+// errorCount indicates the number of errors reported.
+func (e *parseErrors) errorCount() int {
+ return len(e.errs.GetErrors())
+}
+
+func (e *parseErrors) internalError(message string) {
+ e.errs.ReportErrorAtID(0, common.NoLocation, message)
+}
+
+func (e *parseErrors) syntaxError(l common.Location, message string) {
+ e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message))
+}
+
+func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) {
+ e.errs.ReportErrorAtID(id, l, message, args...)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
new file mode 100644
index 000000000..3efed87b7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
@@ -0,0 +1,26 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(
+ default_visibility = ["//:__subpackages__"],
+ licenses = ["notice"], # Apache 2.0
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "cel_base_listener.go",
+ "cel_base_visitor.go",
+ "cel_lexer.go",
+ "cel_listener.go",
+ "cel_parser.go",
+ "cel_visitor.go",
+ ],
+ data = [
+ "CEL.tokens",
+ "CELLexer.tokens",
+ ],
+ importpath = "github.com/google/cel-go/parser/gen",
+ deps = [
+ "@com_github_antlr4_go_antlr_v4//:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
new file mode 100644
index 000000000..b011da803
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
@@ -0,0 +1,200 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+grammar CEL;
+
+// Grammar Rules
+// =============
+
+start
+ : e=expr EOF
+ ;
+
+expr
+ : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)?
+ ;
+
+conditionalOr
+ : e=conditionalAnd (ops+='||' e1+=conditionalAnd)*
+ ;
+
+conditionalAnd
+ : e=relation (ops+='&&' e1+=relation)*
+ ;
+
+relation
+ : calc
+ | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation
+ ;
+
+calc
+ : unary
+ | calc op=('*'|'/'|'%') calc
+ | calc op=('+'|'-') calc
+ ;
+
+unary
+ : member # MemberExpr
+ | (ops+='!')+ member # LogicalNot
+ | (ops+='-')+ member # Negate
+ ;
+
+member
+ : primary # PrimaryExpr
+ | member op='.' (opt='?')? id=IDENTIFIER # Select
+ | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall
+ | member op='[' (opt='?')? index=expr ']' # Index
+ ;
+
+primary
+ : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall
+ | '(' e=expr ')' # Nested
+ | op='[' elems=listInit? ','? ']' # CreateList
+ | op='{' entries=mapInitializerList? ','? '}' # CreateStruct
+ | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)*
+ op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
+ | literal # ConstantLiteral
+ ;
+
+exprList
+ : e+=expr (',' e+=expr)*
+ ;
+
+listInit
+ : elems+=optExpr (',' elems+=optExpr)*
+ ;
+
+fieldInitializerList
+ : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)*
+ ;
+
+optField
+ : (opt='?')? IDENTIFIER
+ ;
+
+mapInitializerList
+ : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)*
+ ;
+
+optExpr
+ : (opt='?')? e=expr
+ ;
+
+literal
+ : sign=MINUS? tok=NUM_INT # Int
+ | tok=NUM_UINT # Uint
+ | sign=MINUS? tok=NUM_FLOAT # Double
+ | tok=STRING # String
+ | tok=BYTES # Bytes
+ | tok=CEL_TRUE # BoolTrue
+ | tok=CEL_FALSE # BoolFalse
+ | tok=NUL # Null
+ ;
+
+// Lexer Rules
+// ===========
+
+EQUALS : '==';
+NOT_EQUALS : '!=';
+IN: 'in';
+LESS : '<';
+LESS_EQUALS : '<=';
+GREATER_EQUALS : '>=';
+GREATER : '>';
+LOGICAL_AND : '&&';
+LOGICAL_OR : '||';
+
+LBRACKET : '[';
+RPRACKET : ']';
+LBRACE : '{';
+RBRACE : '}';
+LPAREN : '(';
+RPAREN : ')';
+DOT : '.';
+COMMA : ',';
+MINUS : '-';
+EXCLAM : '!';
+QUESTIONMARK : '?';
+COLON : ':';
+PLUS : '+';
+STAR : '*';
+SLASH : '/';
+PERCENT : '%';
+CEL_TRUE : 'true';
+CEL_FALSE : 'false';
+NUL : 'null';
+
+fragment BACKSLASH : '\\';
+fragment LETTER : 'A'..'Z' | 'a'..'z' ;
+fragment DIGIT : '0'..'9' ;
+fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ;
+fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
+fragment RAW : 'r' | 'R';
+
+fragment ESC_SEQ
+ : ESC_CHAR_SEQ
+ | ESC_BYTE_SEQ
+ | ESC_UNI_SEQ
+ | ESC_OCT_SEQ
+ ;
+
+fragment ESC_CHAR_SEQ
+ : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`')
+ ;
+
+fragment ESC_OCT_SEQ
+ : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7')
+ ;
+
+fragment ESC_BYTE_SEQ
+ : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT
+ ;
+
+fragment ESC_UNI_SEQ
+ : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
+ | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT
+ ;
+
+WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ;
+COMMENT : '//' (~'\n')* -> channel(HIDDEN) ;
+
+NUM_FLOAT
+ : ( DIGIT+ ('.' DIGIT+) EXPONENT?
+ | DIGIT+ EXPONENT
+ | '.' DIGIT+ EXPONENT?
+ )
+ ;
+
+NUM_INT
+ : ( DIGIT+ | '0x' HEXDIGIT+ );
+
+NUM_UINT
+ : DIGIT+ ( 'u' | 'U' )
+ | '0x' HEXDIGIT+ ( 'u' | 'U' )
+ ;
+
+STRING
+ : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"'
+ | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\''
+ | '"""' (ESC_SEQ | ~('\\'))*? '"""'
+ | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\''
+ | RAW '"' ~('"'|'\n'|'\r')* '"'
+ | RAW '\'' ~('\''|'\n'|'\r')* '\''
+ | RAW '"""' .*? '"""'
+ | RAW '\'\'\'' .*? '\'\'\''
+ ;
+
+BYTES : ('b' | 'B') STRING;
+
+IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*;
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
new file mode 100644
index 000000000..75b8bb3e2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
@@ -0,0 +1,99 @@
+token literal names:
+null
+'=='
+'!='
+'in'
+'<'
+'<='
+'>='
+'>'
+'&&'
+'||'
+'['
+']'
+'{'
+'}'
+'('
+')'
+'.'
+','
+'-'
+'!'
+'?'
+':'
+'+'
+'*'
+'/'
+'%'
+'true'
+'false'
+'null'
+null
+null
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+rule names:
+start
+expr
+conditionalOr
+conditionalAnd
+relation
+calc
+unary
+member
+primary
+exprList
+listInit
+fieldInitializerList
+optField
+mapInitializerList
+optExpr
+literal
+
+
+atn:
+[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248]
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
new file mode 100644
index 000000000..b305bdad3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens
@@ -0,0 +1,64 @@
+EQUALS=1
+NOT_EQUALS=2
+IN=3
+LESS=4
+LESS_EQUALS=5
+GREATER_EQUALS=6
+GREATER=7
+LOGICAL_AND=8
+LOGICAL_OR=9
+LBRACKET=10
+RPRACKET=11
+LBRACE=12
+RBRACE=13
+LPAREN=14
+RPAREN=15
+DOT=16
+COMMA=17
+MINUS=18
+EXCLAM=19
+QUESTIONMARK=20
+COLON=21
+PLUS=22
+STAR=23
+SLASH=24
+PERCENT=25
+CEL_TRUE=26
+CEL_FALSE=27
+NUL=28
+WHITESPACE=29
+COMMENT=30
+NUM_FLOAT=31
+NUM_INT=32
+NUM_UINT=33
+STRING=34
+BYTES=35
+IDENTIFIER=36
+'=='=1
+'!='=2
+'in'=3
+'<'=4
+'<='=5
+'>='=6
+'>'=7
+'&&'=8
+'||'=9
+'['=10
+']'=11
+'{'=12
+'}'=13
+'('=14
+')'=15
+'.'=16
+','=17
+'-'=18
+'!'=19
+'?'=20
+':'=21
+'+'=22
+'*'=23
+'/'=24
+'%'=25
+'true'=26
+'false'=27
+'null'=28
diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
new file mode 100644
index 000000000..26e7f471e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp
@@ -0,0 +1,136 @@
+token literal names:
+null
+'=='
+'!='
+'in'
+'<'
+'<='
+'>='
+'>'
+'&&'
+'||'
+'['
+']'
+'{'
+'}'
+'('
+')'
+'.'
+','
+'-'
+'!'
+'?'
+':'
+'+'
+'*'
+'/'
+'%'
+'true'
+'false'
+'null'
+null
+null
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+rule names:
+EQUALS
+NOT_EQUALS
+IN
+LESS
+LESS_EQUALS
+GREATER_EQUALS
+GREATER
+LOGICAL_AND
+LOGICAL_OR
+LBRACKET
+RPRACKET
+LBRACE
+RBRACE
+LPAREN
+RPAREN
+DOT
+COMMA
+MINUS
+EXCLAM
+QUESTIONMARK
+COLON
+PLUS
+STAR
+SLASH
+PERCENT
+CEL_TRUE
+CEL_FALSE
+NUL
+BACKSLASH
+LETTER
+DIGIT
+EXPONENT
+HEXDIGIT
+RAW
+ESC_SEQ
+ESC_CHAR_SEQ
+ESC_OCT_SEQ
+ESC_BYTE_SEQ
+ESC_UNI_SEQ
+WHITESPACE
+COMMENT
+NUM_FLOAT
+NUM_INT
+NUM_UINT
+STRING
+BYTES
+IDENTIFIER
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0]
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
new file mode 100644
index 000000000..b305bdad3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens
@@ -0,0 +1,64 @@
+EQUALS=1
+NOT_EQUALS=2
+IN=3
+LESS=4
+LESS_EQUALS=5
+GREATER_EQUALS=6
+GREATER=7
+LOGICAL_AND=8
+LOGICAL_OR=9
+LBRACKET=10
+RPRACKET=11
+LBRACE=12
+RBRACE=13
+LPAREN=14
+RPAREN=15
+DOT=16
+COMMA=17
+MINUS=18
+EXCLAM=19
+QUESTIONMARK=20
+COLON=21
+PLUS=22
+STAR=23
+SLASH=24
+PERCENT=25
+CEL_TRUE=26
+CEL_FALSE=27
+NUL=28
+WHITESPACE=29
+COMMENT=30
+NUM_FLOAT=31
+NUM_INT=32
+NUM_UINT=33
+STRING=34
+BYTES=35
+IDENTIFIER=36
+'=='=1
+'!='=2
+'in'=3
+'<'=4
+'<='=5
+'>='=6
+'>'=7
+'&&'=8
+'||'=9
+'['=10
+']'=11
+'{'=12
+'}'=13
+'('=14
+')'=15
+'.'=16
+','=17
+'-'=18
+'!'=19
+'?'=20
+':'=21
+'+'=22
+'*'=23
+'/'=24
+'%'=25
+'true'=26
+'false'=27
+'null'=28
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
new file mode 100644
index 000000000..c49d03867
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -0,0 +1,219 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr4-go/antlr/v4"
+
+// BaseCELListener is a complete listener for a parse tree produced by CELParser.
+type BaseCELListener struct{}
+
+var _ CELListener = &BaseCELListener{}
+
+// VisitTerminal is called when a terminal node is visited.
+func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {}
+
+// VisitErrorNode is called when an error node is visited.
+func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {}
+
+// EnterEveryRule is called when any rule is entered.
+func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
+
+// ExitEveryRule is called when any rule is exited.
+func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
+
+// EnterStart is called when production start is entered.
+func (s *BaseCELListener) EnterStart(ctx *StartContext) {}
+
+// ExitStart is called when production start is exited.
+func (s *BaseCELListener) ExitStart(ctx *StartContext) {}
+
+// EnterExpr is called when production expr is entered.
+func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {}
+
+// ExitExpr is called when production expr is exited.
+func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {}
+
+// EnterConditionalOr is called when production conditionalOr is entered.
+func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {}
+
+// ExitConditionalOr is called when production conditionalOr is exited.
+func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {}
+
+// EnterConditionalAnd is called when production conditionalAnd is entered.
+func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {}
+
+// ExitConditionalAnd is called when production conditionalAnd is exited.
+func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {}
+
+// EnterRelation is called when production relation is entered.
+func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {}
+
+// ExitRelation is called when production relation is exited.
+func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {}
+
+// EnterCalc is called when production calc is entered.
+func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {}
+
+// ExitCalc is called when production calc is exited.
+func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {}
+
+// EnterMemberExpr is called when production MemberExpr is entered.
+func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {}
+
+// ExitMemberExpr is called when production MemberExpr is exited.
+func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {}
+
+// EnterLogicalNot is called when production LogicalNot is entered.
+func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {}
+
+// ExitLogicalNot is called when production LogicalNot is exited.
+func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {}
+
+// EnterNegate is called when production Negate is entered.
+func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
+
+// ExitNegate is called when production Negate is exited.
+func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
+
+// EnterMemberCall is called when production MemberCall is entered.
+func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {}
+
+// ExitMemberCall is called when production MemberCall is exited.
+func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {}
+
+// EnterSelect is called when production Select is entered.
+func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {}
+
+// ExitSelect is called when production Select is exited.
+func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {}
+
+// EnterPrimaryExpr is called when production PrimaryExpr is entered.
+func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// ExitPrimaryExpr is called when production PrimaryExpr is exited.
+func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// EnterIndex is called when production Index is entered.
+func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
+
+// ExitIndex is called when production Index is exited.
+func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
+
+// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
+func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited.
+func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// EnterNested is called when production Nested is entered.
+func (s *BaseCELListener) EnterNested(ctx *NestedContext) {}
+
+// ExitNested is called when production Nested is exited.
+func (s *BaseCELListener) ExitNested(ctx *NestedContext) {}
+
+// EnterCreateList is called when production CreateList is entered.
+func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {}
+
+// ExitCreateList is called when production CreateList is exited.
+func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {}
+
+// EnterCreateStruct is called when production CreateStruct is entered.
+func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
+
+// ExitCreateStruct is called when production CreateStruct is exited.
+func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
+
+// EnterCreateMessage is called when production CreateMessage is entered.
+func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
+
+// ExitCreateMessage is called when production CreateMessage is exited.
+func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
+
+// EnterConstantLiteral is called when production ConstantLiteral is entered.
+func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// ExitConstantLiteral is called when production ConstantLiteral is exited.
+func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// EnterExprList is called when production exprList is entered.
+func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
+
+// ExitExprList is called when production exprList is exited.
+func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
+
+// EnterListInit is called when production listInit is entered.
+func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {}
+
+// ExitListInit is called when production listInit is exited.
+func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {}
+
+// EnterFieldInitializerList is called when production fieldInitializerList is entered.
+func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// ExitFieldInitializerList is called when production fieldInitializerList is exited.
+func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// EnterOptField is called when production optField is entered.
+func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {}
+
+// ExitOptField is called when production optField is exited.
+func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {}
+
+// EnterMapInitializerList is called when production mapInitializerList is entered.
+func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
+
+// ExitMapInitializerList is called when production mapInitializerList is exited.
+func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
+
+// EnterOptExpr is called when production optExpr is entered.
+func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {}
+
+// ExitOptExpr is called when production optExpr is exited.
+func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {}
+
+// EnterInt is called when production Int is entered.
+func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
+
+// ExitInt is called when production Int is exited.
+func (s *BaseCELListener) ExitInt(ctx *IntContext) {}
+
+// EnterUint is called when production Uint is entered.
+func (s *BaseCELListener) EnterUint(ctx *UintContext) {}
+
+// ExitUint is called when production Uint is exited.
+func (s *BaseCELListener) ExitUint(ctx *UintContext) {}
+
+// EnterDouble is called when production Double is entered.
+func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {}
+
+// ExitDouble is called when production Double is exited.
+func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {}
+
+// EnterString is called when production String is entered.
+func (s *BaseCELListener) EnterString(ctx *StringContext) {}
+
+// ExitString is called when production String is exited.
+func (s *BaseCELListener) ExitString(ctx *StringContext) {}
+
+// EnterBytes is called when production Bytes is entered.
+func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {}
+
+// ExitBytes is called when production Bytes is exited.
+func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {}
+
+// EnterBoolTrue is called when production BoolTrue is entered.
+func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {}
+
+// ExitBoolTrue is called when production BoolTrue is exited.
+func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {}
+
+// EnterBoolFalse is called when production BoolFalse is entered.
+func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {}
+
+// ExitBoolFalse is called when production BoolFalse is exited.
+func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {}
+
+// EnterNull is called when production Null is entered.
+func (s *BaseCELListener) EnterNull(ctx *NullContext) {}
+
+// ExitNull is called when production Null is exited.
+func (s *BaseCELListener) ExitNull(ctx *NullContext) {}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
new file mode 100644
index 000000000..b2c0783d3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -0,0 +1,141 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr4-go/antlr/v4"
+
+
+type BaseCELVisitor struct {
+ *antlr.BaseParseTreeVisitor
+}
+
+func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} {
+ return v.VisitChildren(ctx)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
new file mode 100644
index 000000000..e026cc46f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -0,0 +1,344 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen
+import (
+ "fmt"
+ "sync"
+ "unicode"
+ "github.com/antlr4-go/antlr/v4"
+)
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = sync.Once{}
+var _ = unicode.IsLetter
+
+
+type CELLexer struct {
+ *antlr.BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+var CELLexerLexerStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ ChannelNames []string
+ ModeNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ RuleNames []string
+ PredictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func cellexerLexerInit() {
+ staticData := &CELLexerLexerStaticData
+ staticData.ChannelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+ }
+ staticData.ModeNames = []string{
+ "DEFAULT_MODE",
+ }
+ staticData.LiteralNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.SymbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.RuleNames = []string{
+ "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW",
+ "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ",
+ "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING",
+ "BYTES", "IDENTIFIER",
+ }
+ staticData.PredictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
+ 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
+ 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
+ 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
+ 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
+ 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
+ 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
+ 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7,
+ 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4,
+ 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8,
+ 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13,
+ 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1,
+ 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24,
+ 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1,
+ 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29,
+ 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31,
+ 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1,
+ 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36,
+ 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1,
+ 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38,
+ 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39,
+ 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40,
+ 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11,
+ 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253,
+ 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261,
+ 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1,
+ 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11,
+ 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42,
+ 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43,
+ 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43,
+ 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44,
+ 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5,
+ 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349,
+ 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44,
+ 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44,
+ 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1,
+ 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44,
+ 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1,
+ 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46,
+ 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7,
+ 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27,
+ 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45,
+ 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0,
+ 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31,
+ 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122,
+ 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97,
+ 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96,
+ 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120,
+ 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117,
+ 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92,
+ 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39,
+ 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5,
+ 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13,
+ 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0,
+ 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0,
+ 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0,
+ 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0,
+ 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
+ 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81,
+ 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0,
+ 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0,
+ 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0,
+ 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17,
+ 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1,
+ 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0,
+ 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138,
+ 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0,
+ 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152,
+ 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0,
+ 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183,
+ 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0,
+ 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227,
+ 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0,
+ 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413,
+ 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0,
+ 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102,
+ 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5,
+ 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0,
+ 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111,
+ 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5,
+ 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124,
+ 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0,
+ 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124,
+ 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26,
+ 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41,
+ 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0,
+ 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137,
+ 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5,
+ 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0,
+ 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147,
+ 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5,
+ 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114,
+ 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0,
+ 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0,
+ 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0,
+ 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0,
+ 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169,
+ 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2,
+ 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0,
+ 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178,
+ 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179,
+ 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3,
+ 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187,
+ 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190,
+ 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189,
+ 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57,
+ 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28,
+ 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55,
+ 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203,
+ 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207,
+ 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210,
+ 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225,
+ 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3,
+ 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3,
+ 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3,
+ 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0,
+ 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0,
+ 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229,
+ 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80,
+ 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1,
+ 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0,
+ 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241,
+ 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246,
+ 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1,
+ 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0,
+ 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
+ 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255,
+ 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275,
+ 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1,
+ 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0,
+ 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0,
+ 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269,
+ 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273,
+ 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1,
+ 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0,
+ 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278,
+ 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290,
+ 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1,
+ 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0,
+ 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0,
+ 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293,
+ 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1,
+ 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0,
+ 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0,
+ 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303,
+ 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306,
+ 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0,
+ 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0,
+ 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313,
+ 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316,
+ 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34,
+ 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0,
+ 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324,
+ 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324,
+ 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5,
+ 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69,
+ 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0,
+ 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337,
+ 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341,
+ 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5,
+ 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69,
+ 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0,
+ 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351,
+ 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355,
+ 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5,
+ 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0,
+ 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0,
+ 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366,
+ 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368,
+ 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0,
+ 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0,
+ 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378,
+ 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383,
+ 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0,
+ 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0,
+ 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390,
+ 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394,
+ 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9,
+ 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0,
+ 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402,
+ 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407,
+ 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0,
+ 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0,
+ 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409,
+ 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3,
+ 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0,
+ 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30,
+ 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418,
+ 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421,
+ 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181,
+ 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294,
+ 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406,
+ 413, 418, 420, 1, 0, 1, 0,
+}
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CELLexerInit initializes any static state used to implement CELLexer. By default the
+// static state used to implement the lexer is lazily initialized during the first call to
+// NewCELLexer(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CELLexerInit() {
+ staticData := &CELLexerLexerStaticData
+ staticData.once.Do(cellexerLexerInit)
+}
+
+// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream.
+func NewCELLexer(input antlr.CharStream) *CELLexer {
+ CELLexerInit()
+ l := new(CELLexer)
+ l.BaseLexer = antlr.NewBaseLexer(input)
+ staticData := &CELLexerLexerStaticData
+ l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
+ l.channelNames = staticData.ChannelNames
+ l.modeNames = staticData.ModeNames
+ l.RuleNames = staticData.RuleNames
+ l.LiteralNames = staticData.LiteralNames
+ l.SymbolicNames = staticData.SymbolicNames
+ l.GrammarFileName = "CEL.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// CELLexer tokens.
+const (
+ CELLexerEQUALS = 1
+ CELLexerNOT_EQUALS = 2
+ CELLexerIN = 3
+ CELLexerLESS = 4
+ CELLexerLESS_EQUALS = 5
+ CELLexerGREATER_EQUALS = 6
+ CELLexerGREATER = 7
+ CELLexerLOGICAL_AND = 8
+ CELLexerLOGICAL_OR = 9
+ CELLexerLBRACKET = 10
+ CELLexerRPRACKET = 11
+ CELLexerLBRACE = 12
+ CELLexerRBRACE = 13
+ CELLexerLPAREN = 14
+ CELLexerRPAREN = 15
+ CELLexerDOT = 16
+ CELLexerCOMMA = 17
+ CELLexerMINUS = 18
+ CELLexerEXCLAM = 19
+ CELLexerQUESTIONMARK = 20
+ CELLexerCOLON = 21
+ CELLexerPLUS = 22
+ CELLexerSTAR = 23
+ CELLexerSLASH = 24
+ CELLexerPERCENT = 25
+ CELLexerCEL_TRUE = 26
+ CELLexerCEL_FALSE = 27
+ CELLexerNUL = 28
+ CELLexerWHITESPACE = 29
+ CELLexerCOMMENT = 30
+ CELLexerNUM_FLOAT = 31
+ CELLexerNUM_INT = 32
+ CELLexerNUM_UINT = 33
+ CELLexerSTRING = 34
+ CELLexerBYTES = 35
+ CELLexerIDENTIFIER = 36
+)
+
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
new file mode 100644
index 000000000..22dc99789
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -0,0 +1,208 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr4-go/antlr/v4"
+
+
+// CELListener is a complete listener for a parse tree produced by CELParser.
+type CELListener interface {
+ antlr.ParseTreeListener
+
+ // EnterStart is called when entering the start production.
+ EnterStart(c *StartContext)
+
+ // EnterExpr is called when entering the expr production.
+ EnterExpr(c *ExprContext)
+
+ // EnterConditionalOr is called when entering the conditionalOr production.
+ EnterConditionalOr(c *ConditionalOrContext)
+
+ // EnterConditionalAnd is called when entering the conditionalAnd production.
+ EnterConditionalAnd(c *ConditionalAndContext)
+
+ // EnterRelation is called when entering the relation production.
+ EnterRelation(c *RelationContext)
+
+ // EnterCalc is called when entering the calc production.
+ EnterCalc(c *CalcContext)
+
+ // EnterMemberExpr is called when entering the MemberExpr production.
+ EnterMemberExpr(c *MemberExprContext)
+
+ // EnterLogicalNot is called when entering the LogicalNot production.
+ EnterLogicalNot(c *LogicalNotContext)
+
+ // EnterNegate is called when entering the Negate production.
+ EnterNegate(c *NegateContext)
+
+ // EnterMemberCall is called when entering the MemberCall production.
+ EnterMemberCall(c *MemberCallContext)
+
+ // EnterSelect is called when entering the Select production.
+ EnterSelect(c *SelectContext)
+
+ // EnterPrimaryExpr is called when entering the PrimaryExpr production.
+ EnterPrimaryExpr(c *PrimaryExprContext)
+
+ // EnterIndex is called when entering the Index production.
+ EnterIndex(c *IndexContext)
+
+ // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
+ EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // EnterNested is called when entering the Nested production.
+ EnterNested(c *NestedContext)
+
+ // EnterCreateList is called when entering the CreateList production.
+ EnterCreateList(c *CreateListContext)
+
+ // EnterCreateStruct is called when entering the CreateStruct production.
+ EnterCreateStruct(c *CreateStructContext)
+
+ // EnterCreateMessage is called when entering the CreateMessage production.
+ EnterCreateMessage(c *CreateMessageContext)
+
+ // EnterConstantLiteral is called when entering the ConstantLiteral production.
+ EnterConstantLiteral(c *ConstantLiteralContext)
+
+ // EnterExprList is called when entering the exprList production.
+ EnterExprList(c *ExprListContext)
+
+ // EnterListInit is called when entering the listInit production.
+ EnterListInit(c *ListInitContext)
+
+ // EnterFieldInitializerList is called when entering the fieldInitializerList production.
+ EnterFieldInitializerList(c *FieldInitializerListContext)
+
+ // EnterOptField is called when entering the optField production.
+ EnterOptField(c *OptFieldContext)
+
+ // EnterMapInitializerList is called when entering the mapInitializerList production.
+ EnterMapInitializerList(c *MapInitializerListContext)
+
+ // EnterOptExpr is called when entering the optExpr production.
+ EnterOptExpr(c *OptExprContext)
+
+ // EnterInt is called when entering the Int production.
+ EnterInt(c *IntContext)
+
+ // EnterUint is called when entering the Uint production.
+ EnterUint(c *UintContext)
+
+ // EnterDouble is called when entering the Double production.
+ EnterDouble(c *DoubleContext)
+
+ // EnterString is called when entering the String production.
+ EnterString(c *StringContext)
+
+ // EnterBytes is called when entering the Bytes production.
+ EnterBytes(c *BytesContext)
+
+ // EnterBoolTrue is called when entering the BoolTrue production.
+ EnterBoolTrue(c *BoolTrueContext)
+
+ // EnterBoolFalse is called when entering the BoolFalse production.
+ EnterBoolFalse(c *BoolFalseContext)
+
+ // EnterNull is called when entering the Null production.
+ EnterNull(c *NullContext)
+
+ // ExitStart is called when exiting the start production.
+ ExitStart(c *StartContext)
+
+ // ExitExpr is called when exiting the expr production.
+ ExitExpr(c *ExprContext)
+
+ // ExitConditionalOr is called when exiting the conditionalOr production.
+ ExitConditionalOr(c *ConditionalOrContext)
+
+ // ExitConditionalAnd is called when exiting the conditionalAnd production.
+ ExitConditionalAnd(c *ConditionalAndContext)
+
+ // ExitRelation is called when exiting the relation production.
+ ExitRelation(c *RelationContext)
+
+ // ExitCalc is called when exiting the calc production.
+ ExitCalc(c *CalcContext)
+
+ // ExitMemberExpr is called when exiting the MemberExpr production.
+ ExitMemberExpr(c *MemberExprContext)
+
+ // ExitLogicalNot is called when exiting the LogicalNot production.
+ ExitLogicalNot(c *LogicalNotContext)
+
+ // ExitNegate is called when exiting the Negate production.
+ ExitNegate(c *NegateContext)
+
+ // ExitMemberCall is called when exiting the MemberCall production.
+ ExitMemberCall(c *MemberCallContext)
+
+ // ExitSelect is called when exiting the Select production.
+ ExitSelect(c *SelectContext)
+
+ // ExitPrimaryExpr is called when exiting the PrimaryExpr production.
+ ExitPrimaryExpr(c *PrimaryExprContext)
+
+ // ExitIndex is called when exiting the Index production.
+ ExitIndex(c *IndexContext)
+
+ // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
+ ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // ExitNested is called when exiting the Nested production.
+ ExitNested(c *NestedContext)
+
+ // ExitCreateList is called when exiting the CreateList production.
+ ExitCreateList(c *CreateListContext)
+
+ // ExitCreateStruct is called when exiting the CreateStruct production.
+ ExitCreateStruct(c *CreateStructContext)
+
+ // ExitCreateMessage is called when exiting the CreateMessage production.
+ ExitCreateMessage(c *CreateMessageContext)
+
+ // ExitConstantLiteral is called when exiting the ConstantLiteral production.
+ ExitConstantLiteral(c *ConstantLiteralContext)
+
+ // ExitExprList is called when exiting the exprList production.
+ ExitExprList(c *ExprListContext)
+
+ // ExitListInit is called when exiting the listInit production.
+ ExitListInit(c *ListInitContext)
+
+ // ExitFieldInitializerList is called when exiting the fieldInitializerList production.
+ ExitFieldInitializerList(c *FieldInitializerListContext)
+
+ // ExitOptField is called when exiting the optField production.
+ ExitOptField(c *OptFieldContext)
+
+ // ExitMapInitializerList is called when exiting the mapInitializerList production.
+ ExitMapInitializerList(c *MapInitializerListContext)
+
+ // ExitOptExpr is called when exiting the optExpr production.
+ ExitOptExpr(c *OptExprContext)
+
+ // ExitInt is called when exiting the Int production.
+ ExitInt(c *IntContext)
+
+ // ExitUint is called when exiting the Uint production.
+ ExitUint(c *UintContext)
+
+ // ExitDouble is called when exiting the Double production.
+ ExitDouble(c *DoubleContext)
+
+ // ExitString is called when exiting the String production.
+ ExitString(c *StringContext)
+
+ // ExitBytes is called when exiting the Bytes production.
+ ExitBytes(c *BytesContext)
+
+ // ExitBoolTrue is called when exiting the BoolTrue production.
+ ExitBoolTrue(c *BoolTrueContext)
+
+ // ExitBoolFalse is called when exiting the BoolFalse production.
+ ExitBoolFalse(c *BoolFalseContext)
+
+ // ExitNull is called when exiting the Null production.
+ ExitNull(c *NullContext)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
new file mode 100644
index 000000000..35334af61
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -0,0 +1,6274 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen // CEL
+import (
+ "fmt"
+ "strconv"
+ "sync"
+
+ "github.com/antlr4-go/antlr/v4"
+)
+
+// Suppress unused import errors
+var _ = fmt.Printf
+var _ = strconv.Itoa
+var _ = sync.Once{}
+
+
+type CELParser struct {
+ *antlr.BaseParser
+}
+
+var CELParserStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ LiteralNames []string
+ SymbolicNames []string
+ RuleNames []string
+ PredictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func celParserInit() {
+ staticData := &CELParserStaticData
+ staticData.LiteralNames = []string{
+ "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'",
+ "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+ }
+ staticData.SymbolicNames = []string{
+ "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE",
+ "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT",
+ "STRING", "BYTES", "IDENTIFIER",
+ }
+ staticData.RuleNames = []string{
+ "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
+ "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
+ "optField", "mapInitializerList", "optExpr", "literal",
+ }
+ staticData.PredictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
+ 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
+ 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
+ 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
+ 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
+ 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
+ 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
+ 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
+ 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
+ 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
+ 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
+ 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
+ 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
+ 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
+ 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
+ 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
+ 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
+ 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
+ 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
+ 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
+ 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
+ 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
+ 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
+ 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
+ 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
+ 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
+ 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
+ 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
+ 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
+ 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
+ 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
+ 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
+ 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
+ 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
+ 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
+ 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
+ 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
+ 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
+ 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
+ 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
+ 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
+ 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
+ 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
+ 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
+ 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
+ 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
+ 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
+ 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
+ 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
+ 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
+ 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
+ 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
+ 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
+ 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
+ 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
+ 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
+ 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
+ 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
+ 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
+ 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
+ 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
+ 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
+ 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
+ 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
+ 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
+ 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
+ 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
+ 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
+ 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
+ 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
+ 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
+ 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
+ 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
+ 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
+ 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
+ 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
+ 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
+ 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
+ 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
+ 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
+ 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
+ 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
+ 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
+ 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
+ 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
+ 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
+ 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
+ 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
+ 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
+ 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
+ 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
+ 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
+ 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
+ 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
+ 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
+ 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
+ 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
+ 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
+ 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
+ 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
+ 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
+ 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
+ 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
+ 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
+ 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
+ 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
+ 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
+ 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
+ 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
+ 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
+ 240, 248,
+}
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CELParserInit initializes any static state used to implement CELParser. By default the
+// static state used to implement the parser is lazily initialized during the first call to
+// NewCELParser(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CELParserInit() {
+ staticData := &CELParserStaticData
+ staticData.once.Do(celParserInit)
+}
+
+// NewCELParser produces a new parser instance for the optional input antlr.TokenStream.
+func NewCELParser(input antlr.TokenStream) *CELParser {
+ CELParserInit()
+ this := new(CELParser)
+ this.BaseParser = antlr.NewBaseParser(input)
+ staticData := &CELParserStaticData
+ this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
+ this.RuleNames = staticData.RuleNames
+ this.LiteralNames = staticData.LiteralNames
+ this.SymbolicNames = staticData.SymbolicNames
+ this.GrammarFileName = "CEL.g4"
+
+ return this
+}
+
+
+// CELParser tokens.
+const (
+ CELParserEOF = antlr.TokenEOF
+ CELParserEQUALS = 1
+ CELParserNOT_EQUALS = 2
+ CELParserIN = 3
+ CELParserLESS = 4
+ CELParserLESS_EQUALS = 5
+ CELParserGREATER_EQUALS = 6
+ CELParserGREATER = 7
+ CELParserLOGICAL_AND = 8
+ CELParserLOGICAL_OR = 9
+ CELParserLBRACKET = 10
+ CELParserRPRACKET = 11
+ CELParserLBRACE = 12
+ CELParserRBRACE = 13
+ CELParserLPAREN = 14
+ CELParserRPAREN = 15
+ CELParserDOT = 16
+ CELParserCOMMA = 17
+ CELParserMINUS = 18
+ CELParserEXCLAM = 19
+ CELParserQUESTIONMARK = 20
+ CELParserCOLON = 21
+ CELParserPLUS = 22
+ CELParserSTAR = 23
+ CELParserSLASH = 24
+ CELParserPERCENT = 25
+ CELParserCEL_TRUE = 26
+ CELParserCEL_FALSE = 27
+ CELParserNUL = 28
+ CELParserWHITESPACE = 29
+ CELParserCOMMENT = 30
+ CELParserNUM_FLOAT = 31
+ CELParserNUM_INT = 32
+ CELParserNUM_UINT = 33
+ CELParserSTRING = 34
+ CELParserBYTES = 35
+ CELParserIDENTIFIER = 36
+)
+
+// CELParser rules.
+const (
+ CELParserRULE_start = 0
+ CELParserRULE_expr = 1
+ CELParserRULE_conditionalOr = 2
+ CELParserRULE_conditionalAnd = 3
+ CELParserRULE_relation = 4
+ CELParserRULE_calc = 5
+ CELParserRULE_unary = 6
+ CELParserRULE_member = 7
+ CELParserRULE_primary = 8
+ CELParserRULE_exprList = 9
+ CELParserRULE_listInit = 10
+ CELParserRULE_fieldInitializerList = 11
+ CELParserRULE_optField = 12
+ CELParserRULE_mapInitializerList = 13
+ CELParserRULE_optExpr = 14
+ CELParserRULE_literal = 15
+)
+
+// IStartContext is an interface to support dynamic dispatch.
+type IStartContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetE returns the e rule contexts.
+ GetE() IExprContext
+
+
+ // SetE sets the e rule contexts.
+ SetE(IExprContext)
+
+
+ // Getter signatures
+ EOF() antlr.TerminalNode
+ Expr() IExprContext
+
+ // IsStartContext differentiates from other interfaces.
+ IsStartContext()
+}
+
+type StartContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IExprContext
+}
+
+func NewEmptyStartContext() *StartContext {
+ var p = new(StartContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_start
+ return p
+}
+
+func InitEmptyStartContext(p *StartContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_start
+}
+
+func (*StartContext) IsStartContext() {}
+
+func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext {
+ var p = new(StartContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_start
+
+ return p
+}
+
+func (s *StartContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *StartContext) GetE() IExprContext { return s.e }
+
+
+func (s *StartContext) SetE(v IExprContext) { s.e = v }
+
+
+func (s *StartContext) EOF() antlr.TerminalNode {
+ return s.GetToken(CELParserEOF, 0)
+}
+
+func (s *StartContext) Expr() IExprContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *StartContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterStart(s)
+ }
+}
+
+func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitStart(s)
+ }
+}
+
+func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitStart(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) Start_() (localctx IStartContext) {
+ localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 0, CELParserRULE_start)
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(32)
+
+ var _x = p.Expr()
+
+
+ localctx.(*StartContext).e = _x
+ }
+ {
+ p.SetState(33)
+ p.Match(CELParserEOF)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IExprContext is an interface to support dynamic dispatch.
+type IExprContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalOrContext
+
+ // GetE1 returns the e1 rule contexts.
+ GetE1() IConditionalOrContext
+
+ // GetE2 returns the e2 rule contexts.
+ GetE2() IExprContext
+
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalOrContext)
+
+ // SetE1 sets the e1 rule contexts.
+ SetE1(IConditionalOrContext)
+
+ // SetE2 sets the e2 rule contexts.
+ SetE2(IExprContext)
+
+
+ // Getter signatures
+ AllConditionalOr() []IConditionalOrContext
+ ConditionalOr(i int) IConditionalOrContext
+ COLON() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+ Expr() IExprContext
+
+ // IsExprContext differentiates from other interfaces.
+ IsExprContext()
+}
+
+type ExprContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalOrContext
+ op antlr.Token
+ e1 IConditionalOrContext
+ e2 IExprContext
+}
+
+func NewEmptyExprContext() *ExprContext {
+ var p = new(ExprContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_expr
+ return p
+}
+
+func InitEmptyExprContext(p *ExprContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_expr
+}
+
+func (*ExprContext) IsExprContext() {}
+
+func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext {
+ var p = new(ExprContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_expr
+
+ return p
+}
+
+func (s *ExprContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExprContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *ExprContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *ExprContext) GetE() IConditionalOrContext { return s.e }
+
+func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 }
+
+func (s *ExprContext) GetE2() IExprContext { return s.e2 }
+
+
+func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v }
+
+func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v }
+
+func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v }
+
+
+func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IConditionalOrContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IConditionalOrContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IConditionalOrContext); ok {
+ tst[i] = t.(IConditionalOrContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IConditionalOrContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalOrContext)
+}
+
+func (s *ExprContext) COLON() antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, 0)
+}
+
+func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *ExprContext) Expr() IExprContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *ExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterExpr(s)
+ }
+}
+
+func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitExpr(s)
+ }
+}
+
+func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) Expr() (localctx IExprContext) {
+ localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 2, CELParserRULE_expr)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(35)
+
+ var _x = p.ConditionalOr()
+
+
+ localctx.(*ExprContext).e = _x
+ }
+ p.SetState(41)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(36)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*ExprContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(37)
+
+ var _x = p.ConditionalOr()
+
+
+ localctx.(*ExprContext).e1 = _x
+ }
+ {
+ p.SetState(38)
+ p.Match(CELParserCOLON)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(39)
+
+ var _x = p.Expr()
+
+
+ localctx.(*ExprContext).e2 = _x
+ }
+
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IConditionalOrContext is an interface to support dynamic dispatch.
+type IConditionalOrContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS9 returns the s9 token.
+ GetS9() antlr.Token
+
+
+ // SetS9 sets the s9 token.
+ SetS9(antlr.Token)
+
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalAndContext
+
+ // Get_conditionalAnd returns the _conditionalAnd rule contexts.
+ Get_conditionalAnd() IConditionalAndContext
+
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalAndContext)
+
+ // Set_conditionalAnd sets the _conditionalAnd rule contexts.
+ Set_conditionalAnd(IConditionalAndContext)
+
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IConditionalAndContext
+
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IConditionalAndContext)
+
+
+ // Getter signatures
+ AllConditionalAnd() []IConditionalAndContext
+ ConditionalAnd(i int) IConditionalAndContext
+ AllLOGICAL_OR() []antlr.TerminalNode
+ LOGICAL_OR(i int) antlr.TerminalNode
+
+ // IsConditionalOrContext differentiates from other interfaces.
+ IsConditionalOrContext()
+}
+
+type ConditionalOrContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalAndContext
+ s9 antlr.Token
+ ops []antlr.Token
+ _conditionalAnd IConditionalAndContext
+ e1 []IConditionalAndContext
+}
+
+func NewEmptyConditionalOrContext() *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalOr
+ return p
+}
+
+func InitEmptyConditionalOrContext(p *ConditionalOrContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalOr
+}
+
+func (*ConditionalOrContext) IsConditionalOrContext() {}
+
+func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalOr
+
+ return p
+}
+
+func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 }
+
+
+func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v }
+
+
+func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops }
+
+
+func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v }
+
+
+func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e }
+
+func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd }
+
+
+func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v }
+
+func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v }
+
+
+func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 }
+
+
+func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v }
+
+
+func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IConditionalAndContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IConditionalAndContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IConditionalAndContext); ok {
+ tst[i] = t.(IConditionalAndContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IConditionalAndContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalAndContext)
+}
+
+func (s *ConditionalOrContext) AllLOGICAL_OR() []antlr.TerminalNode {
+ return s.GetTokens(CELParserLOGICAL_OR)
+}
+
+func (s *ConditionalOrContext) LOGICAL_OR(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserLOGICAL_OR, i)
+}
+
+func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalOr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
+ localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 4, CELParserRULE_conditionalOr)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(43)
+
+ var _x = p.ConditionalAnd()
+
+
+ localctx.(*ConditionalOrContext).e = _x
+ }
+ p.SetState(48)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ for _la == CELParserLOGICAL_OR {
+ {
+ p.SetState(44)
+
+ var _m = p.Match(CELParserLOGICAL_OR)
+
+ localctx.(*ConditionalOrContext).s9 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
+ {
+ p.SetState(45)
+
+ var _x = p.ConditionalAnd()
+
+
+ localctx.(*ConditionalOrContext)._conditionalAnd = _x
+ }
+ localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
+
+
+ p.SetState(50)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IConditionalAndContext is an interface to support dynamic dispatch.
+type IConditionalAndContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS8 returns the s8 token.
+ GetS8() antlr.Token
+
+
+ // SetS8 sets the s8 token.
+ SetS8(antlr.Token)
+
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+
+ // GetE returns the e rule contexts.
+ GetE() IRelationContext
+
+ // Get_relation returns the _relation rule contexts.
+ Get_relation() IRelationContext
+
+
+ // SetE sets the e rule contexts.
+ SetE(IRelationContext)
+
+ // Set_relation sets the _relation rule contexts.
+ Set_relation(IRelationContext)
+
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IRelationContext
+
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IRelationContext)
+
+
+ // Getter signatures
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ AllLOGICAL_AND() []antlr.TerminalNode
+ LOGICAL_AND(i int) antlr.TerminalNode
+
+ // IsConditionalAndContext differentiates from other interfaces.
+ IsConditionalAndContext()
+}
+
+type ConditionalAndContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IRelationContext
+ s8 antlr.Token
+ ops []antlr.Token
+ _relation IRelationContext
+ e1 []IRelationContext
+}
+
+func NewEmptyConditionalAndContext() *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalAnd
+ return p
+}
+
+func InitEmptyConditionalAndContext(p *ConditionalAndContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalAnd
+}
+
+func (*ConditionalAndContext) IsConditionalAndContext() {}
+
+func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalAnd
+
+ return p
+}
+
+func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 }
+
+
+func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v }
+
+
+func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops }
+
+
+func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v }
+
+
+func (s *ConditionalAndContext) GetE() IRelationContext { return s.e }
+
+func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation }
+
+
+func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v }
+
+func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v }
+
+
+func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 }
+
+
+func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v }
+
+
+func (s *ConditionalAndContext) AllRelation() []IRelationContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IRelationContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IRelationContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IRelationContext); ok {
+ tst[i] = t.(IRelationContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalAndContext) Relation(i int) IRelationContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IRelationContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *ConditionalAndContext) AllLOGICAL_AND() []antlr.TerminalNode {
+ return s.GetTokens(CELParserLOGICAL_AND)
+}
+
+func (s *ConditionalAndContext) LOGICAL_AND(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserLOGICAL_AND, i)
+}
+
+func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalAnd(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
+ localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(51)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext).e = _x
+ }
+ p.SetState(56)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ for _la == CELParserLOGICAL_AND {
+ {
+ p.SetState(52)
+
+ var _m = p.Match(CELParserLOGICAL_AND)
+
+ localctx.(*ConditionalAndContext).s8 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
+ {
+ p.SetState(53)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext)._relation = _x
+ }
+ localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
+
+
+ p.SetState(58)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IRelationContext is an interface to support dynamic dispatch.
+type IRelationContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+
+ // Getter signatures
+ Calc() ICalcContext
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ LESS() antlr.TerminalNode
+ LESS_EQUALS() antlr.TerminalNode
+ GREATER_EQUALS() antlr.TerminalNode
+ GREATER() antlr.TerminalNode
+ EQUALS() antlr.TerminalNode
+ NOT_EQUALS() antlr.TerminalNode
+ IN() antlr.TerminalNode
+
+ // IsRelationContext differentiates from other interfaces.
+ IsRelationContext()
+}
+
+type RelationContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ op antlr.Token
+}
+
+func NewEmptyRelationContext() *RelationContext {
+ var p = new(RelationContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_relation
+ return p
+}
+
+func InitEmptyRelationContext(p *RelationContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_relation
+}
+
+func (*RelationContext) IsRelationContext() {}
+
+func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext {
+ var p = new(RelationContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_relation
+
+ return p
+}
+
+func (s *RelationContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *RelationContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *RelationContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *RelationContext) Calc() ICalcContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ICalcContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ICalcContext)
+}
+
+func (s *RelationContext) AllRelation() []IRelationContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IRelationContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IRelationContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IRelationContext); ok {
+ tst[i] = t.(IRelationContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *RelationContext) Relation(i int) IRelationContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IRelationContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *RelationContext) LESS() antlr.TerminalNode {
+ return s.GetToken(CELParserLESS, 0)
+}
+
+func (s *RelationContext) LESS_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserLESS_EQUALS, 0)
+}
+
+func (s *RelationContext) GREATER_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserGREATER_EQUALS, 0)
+}
+
+func (s *RelationContext) GREATER() antlr.TerminalNode {
+ return s.GetToken(CELParserGREATER, 0)
+}
+
+func (s *RelationContext) EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserEQUALS, 0)
+}
+
+func (s *RelationContext) NOT_EQUALS() antlr.TerminalNode {
+ return s.GetToken(CELParserNOT_EQUALS, 0)
+}
+
+func (s *RelationContext) IN() antlr.TerminalNode {
+ return s.GetToken(CELParserIN, 0)
+}
+
+func (s *RelationContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterRelation(s)
+ }
+}
+
+func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitRelation(s)
+ }
+}
+
+func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitRelation(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+
+func (p *CELParser) Relation() (localctx IRelationContext) {
+ return p.relation(0)
+}
+
+func (p *CELParser) relation(_p int) (localctx IRelationContext) {
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+
+ _parentState := p.GetState()
+ localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IRelationContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 8
+ p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p)
+ var _la int
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(60)
+ p.calc(0)
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(67)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ localctx = NewRelationContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
+ p.SetState(62)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(63)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*RelationContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*RelationContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(64)
+ p.relation(2)
+ }
+
+
+ }
+ p.SetState(69)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+ errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.UnrollRecursionContexts(_parentctx)
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// ICalcContext is an interface to support dynamic dispatch.
+type ICalcContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+
+ // Getter signatures
+ Unary() IUnaryContext
+ AllCalc() []ICalcContext
+ Calc(i int) ICalcContext
+ STAR() antlr.TerminalNode
+ SLASH() antlr.TerminalNode
+ PERCENT() antlr.TerminalNode
+ PLUS() antlr.TerminalNode
+ MINUS() antlr.TerminalNode
+
+ // IsCalcContext differentiates from other interfaces.
+ IsCalcContext()
+}
+
+type CalcContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ op antlr.Token
+}
+
+func NewEmptyCalcContext() *CalcContext {
+ var p = new(CalcContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_calc
+ return p
+}
+
+func InitEmptyCalcContext(p *CalcContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_calc
+}
+
+func (*CalcContext) IsCalcContext() {}
+
+func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext {
+ var p = new(CalcContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_calc
+
+ return p
+}
+
+func (s *CalcContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *CalcContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *CalcContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *CalcContext) Unary() IUnaryContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IUnaryContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IUnaryContext)
+}
+
+func (s *CalcContext) AllCalc() []ICalcContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(ICalcContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]ICalcContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(ICalcContext); ok {
+ tst[i] = t.(ICalcContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *CalcContext) Calc(i int) ICalcContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ICalcContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ICalcContext)
+}
+
+func (s *CalcContext) STAR() antlr.TerminalNode {
+ return s.GetToken(CELParserSTAR, 0)
+}
+
+func (s *CalcContext) SLASH() antlr.TerminalNode {
+ return s.GetToken(CELParserSLASH, 0)
+}
+
+func (s *CalcContext) PERCENT() antlr.TerminalNode {
+ return s.GetToken(CELParserPERCENT, 0)
+}
+
+func (s *CalcContext) PLUS() antlr.TerminalNode {
+ return s.GetToken(CELParserPLUS, 0)
+}
+
+func (s *CalcContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+func (s *CalcContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCalc(s)
+ }
+}
+
+func (s *CalcContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCalc(s)
+ }
+}
+
+func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCalc(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+
+func (p *CELParser) Calc() (localctx ICalcContext) {
+ return p.calc(0)
+}
+
+func (p *CELParser) calc(_p int) (localctx ICalcContext) {
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+
+ _parentState := p.GetState()
+ localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx ICalcContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 10
+ p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p)
+ var _la int
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(71)
+ p.Unary()
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(81)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(79)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+
+ switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewCalcContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
+ p.SetState(73)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 2)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(74)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*CalcContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*CalcContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(75)
+ p.calc(3)
+ }
+
+
+ case 2:
+ localctx = NewCalcContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc)
+ p.SetState(76)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(77)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*CalcContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !(_la == CELParserMINUS || _la == CELParserPLUS) {
+ var _ri = p.GetErrorHandler().RecoverInline(p)
+
+ localctx.(*CalcContext).op = _ri
+ } else {
+ p.GetErrorHandler().ReportMatch(p)
+ p.Consume()
+ }
+ }
+ {
+ p.SetState(78)
+ p.calc(2)
+ }
+
+ case antlr.ATNInvalidAltNumber:
+ goto errorExit
+ }
+
+ }
+ p.SetState(83)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+ errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.UnrollRecursionContexts(_parentctx)
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IUnaryContext is an interface to support dynamic dispatch.
+type IUnaryContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsUnaryContext differentiates from other interfaces.
+ IsUnaryContext()
+}
+
+type UnaryContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyUnaryContext() *UnaryContext {
+ var p = new(UnaryContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_unary
+ return p
+}
+
+func InitEmptyUnaryContext(p *UnaryContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_unary
+}
+
+func (*UnaryContext) IsUnaryContext() {}
+
+func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext {
+ var p = new(UnaryContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_unary
+
+ return p
+}
+
+func (s *UnaryContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *UnaryContext) CopyAll(ctx *UnaryContext) {
+ s.CopyFrom(&ctx.BaseParserRuleContext)
+}
+
+func (s *UnaryContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+
+
+type LogicalNotContext struct {
+ UnaryContext
+ s19 antlr.Token
+ ops []antlr.Token
+}
+
+func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext {
+ var p = new(LogicalNotContext)
+
+ InitEmptyUnaryContext(&p.UnaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*UnaryContext))
+
+ return p
+}
+
+
+func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 }
+
+
+func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v }
+
+
+func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops }
+
+
+func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LogicalNotContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *LogicalNotContext) AllEXCLAM() []antlr.TerminalNode {
+ return s.GetTokens(CELParserEXCLAM)
+}
+
+func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserEXCLAM, i)
+}
+
+
+func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterLogicalNot(s)
+ }
+}
+
+func (s *LogicalNotContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitLogicalNot(s)
+ }
+}
+
+func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitLogicalNot(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type MemberExprContext struct {
+ UnaryContext
+}
+
+func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext {
+ var p = new(MemberExprContext)
+
+ InitEmptyUnaryContext(&p.UnaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*UnaryContext))
+
+ return p
+}
+
+func (s *MemberExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberExprContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+
+func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMemberExpr(s)
+ }
+}
+
+func (s *MemberExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMemberExpr(s)
+ }
+}
+
+func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMemberExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type NegateContext struct {
+ UnaryContext
+ s18 antlr.Token
+ ops []antlr.Token
+}
+
+func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext {
+ var p = new(NegateContext)
+
+ InitEmptyUnaryContext(&p.UnaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*UnaryContext))
+
+ return p
+}
+
+
+func (s *NegateContext) GetS18() antlr.Token { return s.s18 }
+
+
+func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v }
+
+
+func (s *NegateContext) GetOps() []antlr.Token { return s.ops }
+
+
+func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *NegateContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NegateContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *NegateContext) AllMINUS() []antlr.TerminalNode {
+ return s.GetTokens(CELParserMINUS)
+}
+
+func (s *NegateContext) MINUS(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, i)
+}
+
+
+func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNegate(s)
+ }
+}
+
+func (s *NegateContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNegate(s)
+ }
+}
+
+func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNegate(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+func (p *CELParser) Unary() (localctx IUnaryContext) {
+ localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 12, CELParserRULE_unary)
+ var _la int
+
+ var _alt int
+
+ p.SetState(97)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+
+ switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewMemberExprContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(84)
+ p.member(0)
+ }
+
+
+ case 2:
+ localctx = NewLogicalNotContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ p.SetState(86)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ for ok := true; ok; ok = _la == CELParserEXCLAM {
+ {
+ p.SetState(85)
+
+ var _m = p.Match(CELParserEXCLAM)
+
+ localctx.(*LogicalNotContext).s19 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19)
+
+
+ p.SetState(88)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+ }
+ {
+ p.SetState(90)
+ p.member(0)
+ }
+
+
+ case 3:
+ localctx = NewNegateContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ p.SetState(92)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = 1
+ for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ switch _alt {
+ case 1:
+ {
+ p.SetState(91)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*NegateContext).s18 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18)
+
+
+
+
+ default:
+ p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil))
+ goto errorExit
+ }
+
+ p.SetState(94)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(96)
+ p.member(0)
+ }
+
+ case antlr.ATNInvalidAltNumber:
+ goto errorExit
+ }
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IMemberContext is an interface to support dynamic dispatch.
+type IMemberContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsMemberContext differentiates from other interfaces.
+ IsMemberContext()
+}
+
+type MemberContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyMemberContext() *MemberContext {
+ var p = new(MemberContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_member
+ return p
+}
+
+func InitEmptyMemberContext(p *MemberContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_member
+}
+
+func (*MemberContext) IsMemberContext() {}
+
+func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext {
+ var p = new(MemberContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_member
+
+ return p
+}
+
+func (s *MemberContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *MemberContext) CopyAll(ctx *MemberContext) {
+ s.CopyFrom(&ctx.BaseParserRuleContext)
+}
+
+func (s *MemberContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+
+
+
+type MemberCallContext struct {
+ MemberContext
+ op antlr.Token
+ id antlr.Token
+ open antlr.Token
+ args IExprListContext
+}
+
+func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext {
+ var p = new(MemberCallContext)
+
+ InitEmptyMemberContext(&p.MemberContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*MemberContext))
+
+ return p
+}
+
+
+func (s *MemberCallContext) GetOp() antlr.Token { return s.op }
+
+func (s *MemberCallContext) GetId() antlr.Token { return s.id }
+
+func (s *MemberCallContext) GetOpen() antlr.Token { return s.open }
+
+
+func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v }
+
+
+func (s *MemberCallContext) GetArgs() IExprListContext { return s.args }
+
+
+func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v }
+
+func (s *MemberCallContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MemberCallContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *MemberCallContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *MemberCallContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *MemberCallContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *MemberCallContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *MemberCallContext) ExprList() IExprListContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprListContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprListContext)
+}
+
+
+func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMemberCall(s)
+ }
+}
+
+func (s *MemberCallContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMemberCall(s)
+ }
+}
+
+func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMemberCall(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type SelectContext struct {
+ MemberContext
+ op antlr.Token
+ opt antlr.Token
+ id antlr.Token
+}
+
+func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext {
+ var p = new(SelectContext)
+
+ InitEmptyMemberContext(&p.MemberContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*MemberContext))
+
+ return p
+}
+
+
+func (s *SelectContext) GetOp() antlr.Token { return s.op }
+
+func (s *SelectContext) GetOpt() antlr.Token { return s.opt }
+
+func (s *SelectContext) GetId() antlr.Token { return s.id }
+
+
+func (s *SelectContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v }
+
+func (s *SelectContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *SelectContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *SelectContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *SelectContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *SelectContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+
+func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterSelect(s)
+ }
+}
+
+func (s *SelectContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitSelect(s)
+ }
+}
+
+func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitSelect(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type PrimaryExprContext struct {
+ MemberContext
+}
+
+func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext {
+ var p = new(PrimaryExprContext)
+
+ InitEmptyMemberContext(&p.MemberContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*MemberContext))
+
+ return p
+}
+
+func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *PrimaryExprContext) Primary() IPrimaryContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IPrimaryContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IPrimaryContext)
+}
+
+
+func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterPrimaryExpr(s)
+ }
+}
+
+func (s *PrimaryExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitPrimaryExpr(s)
+ }
+}
+
+func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitPrimaryExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type IndexContext struct {
+ MemberContext
+ op antlr.Token
+ opt antlr.Token
+ index IExprContext
+}
+
+func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext {
+ var p = new(IndexContext)
+
+ InitEmptyMemberContext(&p.MemberContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*MemberContext))
+
+ return p
+}
+
+
+func (s *IndexContext) GetOp() antlr.Token { return s.op }
+
+func (s *IndexContext) GetOpt() antlr.Token { return s.opt }
+
+
+func (s *IndexContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v }
+
+
+func (s *IndexContext) GetIndex() IExprContext { return s.index }
+
+
+func (s *IndexContext) SetIndex(v IExprContext) { s.index = v }
+
+func (s *IndexContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IndexContext) Member() IMemberContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMemberContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMemberContext)
+}
+
+func (s *IndexContext) RPRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserRPRACKET, 0)
+}
+
+func (s *IndexContext) LBRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACKET, 0)
+}
+
+func (s *IndexContext) Expr() IExprContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+
+func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterIndex(s)
+ }
+}
+
+func (s *IndexContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitIndex(s)
+ }
+}
+
+func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitIndex(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+func (p *CELParser) Member() (localctx IMemberContext) {
+ return p.member(0)
+}
+
+func (p *CELParser) member(_p int) (localctx IMemberContext) {
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+
+ _parentState := p.GetState()
+ localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IMemberContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 14
+ p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p)
+ var _la int
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ localctx = NewPrimaryExprContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+
+ {
+ p.SetState(100)
+ p.Primary()
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(126)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(124)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+
+ switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(102)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 3)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(103)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*SelectContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(105)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(104)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*SelectContext).opt = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(107)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*SelectContext).id = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 2:
+ localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(108)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 2)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(109)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*MemberCallContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(110)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*MemberCallContext).id = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(111)
+
+ var _m = p.Match(CELParserLPAREN)
+
+ localctx.(*MemberCallContext).open = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(113)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) {
+ {
+ p.SetState(112)
+
+ var _x = p.ExprList()
+
+
+ localctx.(*MemberCallContext).args = _x
+ }
+
+ }
+ {
+ p.SetState(115)
+ p.Match(CELParserRPAREN)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 3:
+ localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member)
+ p.SetState(116)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ goto errorExit
+ }
+ {
+ p.SetState(117)
+
+ var _m = p.Match(CELParserLBRACKET)
+
+ localctx.(*IndexContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(119)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(118)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*IndexContext).opt = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(121)
+
+ var _x = p.Expr()
+
+
+ localctx.(*IndexContext).index = _x
+ }
+ {
+ p.SetState(122)
+ p.Match(CELParserRPRACKET)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ case antlr.ATNInvalidAltNumber:
+ goto errorExit
+ }
+
+ }
+ p.SetState(128)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+ errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.UnrollRecursionContexts(_parentctx)
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IPrimaryContext is an interface to support dynamic dispatch.
+type IPrimaryContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsPrimaryContext differentiates from other interfaces.
+ IsPrimaryContext()
+}
+
+type PrimaryContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyPrimaryContext() *PrimaryContext {
+ var p = new(PrimaryContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_primary
+ return p
+}
+
+func InitEmptyPrimaryContext(p *PrimaryContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_primary
+}
+
+func (*PrimaryContext) IsPrimaryContext() {}
+
+func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext {
+ var p = new(PrimaryContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_primary
+
+ return p
+}
+
+func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) {
+ s.CopyFrom(&ctx.BaseParserRuleContext)
+}
+
+func (s *PrimaryContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+
+
+type CreateListContext struct {
+ PrimaryContext
+ op antlr.Token
+ elems IListInitContext
+}
+
+func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext {
+ var p = new(CreateListContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+
+func (s *CreateListContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *CreateListContext) GetElems() IListInitContext { return s.elems }
+
+
+func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v }
+
+func (s *CreateListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateListContext) RPRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserRPRACKET, 0)
+}
+
+func (s *CreateListContext) LBRACKET() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACKET, 0)
+}
+
+func (s *CreateListContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateListContext) ListInit() IListInitContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IListInitContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IListInitContext)
+}
+
+
+func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateList(s)
+ }
+}
+
+func (s *CreateListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateList(s)
+ }
+}
+
+func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type CreateStructContext struct {
+ PrimaryContext
+ op antlr.Token
+ entries IMapInitializerListContext
+}
+
+func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext {
+ var p = new(CreateStructContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+
+func (s *CreateStructContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries }
+
+
+func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v }
+
+func (s *CreateStructContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateStructContext) RBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserRBRACE, 0)
+}
+
+func (s *CreateStructContext) LBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACE, 0)
+}
+
+func (s *CreateStructContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IMapInitializerListContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IMapInitializerListContext)
+}
+
+
+func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateStruct(s)
+ }
+}
+
+func (s *CreateStructContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateStruct(s)
+ }
+}
+
+func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateStruct(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type ConstantLiteralContext struct {
+ PrimaryContext
+}
+
+func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext {
+ var p = new(ConstantLiteralContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConstantLiteralContext) Literal() ILiteralContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ILiteralContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ILiteralContext)
+}
+
+
+func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConstantLiteral(s)
+ }
+}
+
+func (s *ConstantLiteralContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConstantLiteral(s)
+ }
+}
+
+func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConstantLiteral(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type NestedContext struct {
+ PrimaryContext
+ e IExprContext
+}
+
+func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext {
+ var p = new(NestedContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+
+func (s *NestedContext) GetE() IExprContext { return s.e }
+
+
+func (s *NestedContext) SetE(v IExprContext) { s.e = v }
+
+func (s *NestedContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NestedContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *NestedContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *NestedContext) Expr() IExprContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+
+func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNested(s)
+ }
+}
+
+func (s *NestedContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNested(s)
+ }
+}
+
+func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNested(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type CreateMessageContext struct {
+ PrimaryContext
+ leadingDot antlr.Token
+ _IDENTIFIER antlr.Token
+ ids []antlr.Token
+ s16 antlr.Token
+ ops []antlr.Token
+ op antlr.Token
+ entries IFieldInitializerListContext
+}
+
+func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext {
+ var p = new(CreateMessageContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+
+func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot }
+
+func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER }
+
+func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 }
+
+func (s *CreateMessageContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
+
+func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v }
+
+func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v }
+
+func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids }
+
+func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops }
+
+
+func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v }
+
+func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v }
+
+
+func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries }
+
+
+func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v }
+
+func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CreateMessageContext) RBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserRBRACE, 0)
+}
+
+func (s *CreateMessageContext) AllIDENTIFIER() []antlr.TerminalNode {
+ return s.GetTokens(CELParserIDENTIFIER)
+}
+
+func (s *CreateMessageContext) IDENTIFIER(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, i)
+}
+
+func (s *CreateMessageContext) LBRACE() antlr.TerminalNode {
+ return s.GetToken(CELParserLBRACE, 0)
+}
+
+func (s *CreateMessageContext) COMMA() antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, 0)
+}
+
+func (s *CreateMessageContext) AllDOT() []antlr.TerminalNode {
+ return s.GetTokens(CELParserDOT)
+}
+
+func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, i)
+}
+
+func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IFieldInitializerListContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFieldInitializerListContext)
+}
+
+
+func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterCreateMessage(s)
+ }
+}
+
+func (s *CreateMessageContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitCreateMessage(s)
+ }
+}
+
+func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitCreateMessage(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type IdentOrGlobalCallContext struct {
+ PrimaryContext
+ leadingDot antlr.Token
+ id antlr.Token
+ op antlr.Token
+ args IExprListContext
+}
+
+func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext {
+ var p = new(IdentOrGlobalCallContext)
+
+ InitEmptyPrimaryContext(&p.PrimaryContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*PrimaryContext))
+
+ return p
+}
+
+
+func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot }
+
+func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id }
+
+func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op }
+
+
+func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v }
+
+func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v }
+
+func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v }
+
+
+func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args }
+
+
+func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v }
+
+func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserRPAREN, 0)
+}
+
+func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode {
+ return s.GetToken(CELParserDOT, 0)
+}
+
+func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode {
+ return s.GetToken(CELParserLPAREN, 0)
+}
+
+func (s *IdentOrGlobalCallContext) ExprList() IExprListContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprListContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprListContext)
+}
+
+
+func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterIdentOrGlobalCall(s)
+ }
+}
+
+func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitIdentOrGlobalCall(s)
+ }
+}
+
+func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitIdentOrGlobalCall(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+func (p *CELParser) Primary() (localctx IPrimaryContext) {
+ localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 16, CELParserRULE_primary)
+ var _la int
+
+ p.SetState(180)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+
+ switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewIdentOrGlobalCallContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(130)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserDOT {
+ {
+ p.SetState(129)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*IdentOrGlobalCallContext).leadingDot = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(132)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*IdentOrGlobalCallContext).id = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(138)
+ p.GetErrorHandler().Sync(p)
+
+
+ if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 {
+ {
+ p.SetState(133)
+
+ var _m = p.Match(CELParserLPAREN)
+
+ localctx.(*IdentOrGlobalCallContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(135)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) {
+ {
+ p.SetState(134)
+
+ var _x = p.ExprList()
+
+
+ localctx.(*IdentOrGlobalCallContext).args = _x
+ }
+
+ }
+ {
+ p.SetState(137)
+ p.Match(CELParserRPAREN)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ } else if p.HasError() { // JIM
+ goto errorExit
+ }
+
+
+ case 2:
+ localctx = NewNestedContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ {
+ p.SetState(140)
+ p.Match(CELParserLPAREN)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(141)
+
+ var _x = p.Expr()
+
+
+ localctx.(*NestedContext).e = _x
+ }
+ {
+ p.SetState(142)
+ p.Match(CELParserRPAREN)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 3:
+ localctx = NewCreateListContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ {
+ p.SetState(144)
+
+ var _m = p.Match(CELParserLBRACKET)
+
+ localctx.(*CreateListContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(146)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) {
+ {
+ p.SetState(145)
+
+ var _x = p.ListInit()
+
+
+ localctx.(*CreateListContext).elems = _x
+ }
+
+ }
+ p.SetState(149)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(148)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(151)
+ p.Match(CELParserRPRACKET)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 4:
+ localctx = NewCreateStructContext(p, localctx)
+ p.EnterOuterAlt(localctx, 4)
+ {
+ p.SetState(152)
+
+ var _m = p.Match(CELParserLBRACE)
+
+ localctx.(*CreateStructContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(154)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) {
+ {
+ p.SetState(153)
+
+ var _x = p.MapInitializerList()
+
+
+ localctx.(*CreateStructContext).entries = _x
+ }
+
+ }
+ p.SetState(157)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(156)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(159)
+ p.Match(CELParserRBRACE)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 5:
+ localctx = NewCreateMessageContext(p, localctx)
+ p.EnterOuterAlt(localctx, 5)
+ p.SetState(161)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserDOT {
+ {
+ p.SetState(160)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*CreateMessageContext).leadingDot = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(163)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*CreateMessageContext)._IDENTIFIER = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
+ p.SetState(168)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ for _la == CELParserDOT {
+ {
+ p.SetState(164)
+
+ var _m = p.Match(CELParserDOT)
+
+ localctx.(*CreateMessageContext).s16 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16)
+ {
+ p.SetState(165)
+
+ var _m = p.Match(CELParserIDENTIFIER)
+
+ localctx.(*CreateMessageContext)._IDENTIFIER = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER)
+
+
+ p.SetState(170)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+ }
+ {
+ p.SetState(171)
+
+ var _m = p.Match(CELParserLBRACE)
+
+ localctx.(*CreateMessageContext).op = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ p.SetState(173)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER {
+ {
+ p.SetState(172)
+
+ var _x = p.FieldInitializerList()
+
+
+ localctx.(*CreateMessageContext).entries = _x
+ }
+
+ }
+ p.SetState(176)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserCOMMA {
+ {
+ p.SetState(175)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(178)
+ p.Match(CELParserRBRACE)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 6:
+ localctx = NewConstantLiteralContext(p, localctx)
+ p.EnterOuterAlt(localctx, 6)
+ {
+ p.SetState(179)
+ p.Literal()
+ }
+
+ case antlr.ATNInvalidAltNumber:
+ goto errorExit
+ }
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IExprListContext is an interface to support dynamic dispatch.
+type IExprListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+
+ // GetE returns the e rule context list.
+ GetE() []IExprContext
+
+
+ // SetE sets the e rule context list.
+ SetE([]IExprContext)
+
+
+ // Getter signatures
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsExprListContext differentiates from other interfaces.
+ IsExprListContext()
+}
+
+type ExprListContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _expr IExprContext
+ e []IExprContext
+}
+
+func NewEmptyExprListContext() *ExprListContext {
+ var p = new(ExprListContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_exprList
+ return p
+}
+
+func InitEmptyExprListContext(p *ExprListContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_exprList
+}
+
+func (*ExprListContext) IsExprListContext() {}
+
+func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext {
+ var p = new(ExprListContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_exprList
+
+ return p
+}
+
+func (s *ExprListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExprListContext) Get_expr() IExprContext { return s._expr }
+
+
+func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v }
+
+
+func (s *ExprListContext) GetE() []IExprContext { return s.e }
+
+
+func (s *ExprListContext) SetE(v []IExprContext) { s.e = v }
+
+
+func (s *ExprListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ExprListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *ExprListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *ExprListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *ExprListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterExprList(s)
+ }
+}
+
+func (s *ExprListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitExprList(s)
+ }
+}
+
+func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitExprList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) ExprList() (localctx IExprListContext) {
+ localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 18, CELParserRULE_exprList)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(182)
+
+ var _x = p.Expr()
+
+
+ localctx.(*ExprListContext)._expr = _x
+ }
+ localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
+ p.SetState(187)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ for _la == CELParserCOMMA {
+ {
+ p.SetState(183)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(184)
+
+ var _x = p.Expr()
+
+
+ localctx.(*ExprListContext)._expr = _x
+ }
+ localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr)
+
+
+ p.SetState(189)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IListInitContext is an interface to support dynamic dispatch.
+type IListInitContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // Get_optExpr returns the _optExpr rule contexts.
+ Get_optExpr() IOptExprContext
+
+
+ // Set_optExpr sets the _optExpr rule contexts.
+ Set_optExpr(IOptExprContext)
+
+
+ // GetElems returns the elems rule context list.
+ GetElems() []IOptExprContext
+
+
+ // SetElems sets the elems rule context list.
+ SetElems([]IOptExprContext)
+
+
+ // Getter signatures
+ AllOptExpr() []IOptExprContext
+ OptExpr(i int) IOptExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsListInitContext differentiates from other interfaces.
+ IsListInitContext()
+}
+
+type ListInitContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ elems []IOptExprContext
+}
+
+func NewEmptyListInitContext() *ListInitContext {
+ var p = new(ListInitContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_listInit
+ return p
+}
+
+func InitEmptyListInitContext(p *ListInitContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_listInit
+}
+
+func (*ListInitContext) IsListInitContext() {}
+
+func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext {
+ var p = new(ListInitContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_listInit
+
+ return p
+}
+
+func (s *ListInitContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr }
+
+
+func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
+
+
+func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems }
+
+
+func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v }
+
+
+func (s *ListInitContext) AllOptExpr() []IOptExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptExprContext); ok {
+ tst[i] = t.(IOptExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *ListInitContext) OptExpr(i int) IOptExprContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptExprContext)
+}
+
+func (s *ListInitContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *ListInitContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *ListInitContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterListInit(s)
+ }
+}
+
+func (s *ListInitContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitListInit(s)
+ }
+}
+
+func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitListInit(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) ListInit() (localctx IListInitContext) {
+ localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 20, CELParserRULE_listInit)
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(190)
+
+ var _x = p.OptExpr()
+
+
+ localctx.(*ListInitContext)._optExpr = _x
+ }
+ localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
+ p.SetState(195)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(191)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(192)
+
+ var _x = p.OptExpr()
+
+
+ localctx.(*ListInitContext)._optExpr = _x
+ }
+ localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr)
+
+
+ }
+ p.SetState(197)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IFieldInitializerListContext is an interface to support dynamic dispatch.
+type IFieldInitializerListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS21 returns the s21 token.
+ GetS21() antlr.Token
+
+
+ // SetS21 sets the s21 token.
+ SetS21(antlr.Token)
+
+
+ // GetCols returns the cols token list.
+ GetCols() []antlr.Token
+
+
+ // SetCols sets the cols token list.
+ SetCols([]antlr.Token)
+
+
+ // Get_optField returns the _optField rule contexts.
+ Get_optField() IOptFieldContext
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+
+ // Set_optField sets the _optField rule contexts.
+ Set_optField(IOptFieldContext)
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+
+ // GetFields returns the fields rule context list.
+ GetFields() []IOptFieldContext
+
+ // GetValues returns the values rule context list.
+ GetValues() []IExprContext
+
+
+ // SetFields sets the fields rule context list.
+ SetFields([]IOptFieldContext)
+
+ // SetValues sets the values rule context list.
+ SetValues([]IExprContext)
+
+
+ // Getter signatures
+ AllOptField() []IOptFieldContext
+ OptField(i int) IOptFieldContext
+ AllCOLON() []antlr.TerminalNode
+ COLON(i int) antlr.TerminalNode
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsFieldInitializerListContext differentiates from other interfaces.
+ IsFieldInitializerListContext()
+}
+
+type FieldInitializerListContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optField IOptFieldContext
+ fields []IOptFieldContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
+}
+
+func NewEmptyFieldInitializerListContext() *FieldInitializerListContext {
+ var p = new(FieldInitializerListContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_fieldInitializerList
+ return p
+}
+
+func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_fieldInitializerList
+}
+
+func (*FieldInitializerListContext) IsFieldInitializerListContext() {}
+
+func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext {
+ var p = new(FieldInitializerListContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_fieldInitializerList
+
+ return p
+}
+
+func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 }
+
+
+func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
+
+
+func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols }
+
+
+func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
+
+
+func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField }
+
+func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr }
+
+
+func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v }
+
+func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
+
+
+func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields }
+
+func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values }
+
+
+func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v }
+
+func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v }
+
+
+func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptFieldContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptFieldContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptFieldContext); ok {
+ tst[i] = t.(IOptFieldContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptFieldContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptFieldContext)
+}
+
+func (s *FieldInitializerListContext) AllCOLON() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOLON)
+}
+
+func (s *FieldInitializerListContext) COLON(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, i)
+}
+
+func (s *FieldInitializerListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *FieldInitializerListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *FieldInitializerListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *FieldInitializerListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *FieldInitializerListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterFieldInitializerList(s)
+ }
+}
+
+func (s *FieldInitializerListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitFieldInitializerList(s)
+ }
+}
+
+func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitFieldInitializerList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) {
+ localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList)
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(198)
+
+ var _x = p.OptField()
+
+
+ localctx.(*FieldInitializerListContext)._optField = _x
+ }
+ localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
+ {
+ p.SetState(199)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*FieldInitializerListContext).s21 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
+ {
+ p.SetState(200)
+
+ var _x = p.Expr()
+
+
+ localctx.(*FieldInitializerListContext)._expr = _x
+ }
+ localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
+ p.SetState(208)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(201)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(202)
+
+ var _x = p.OptField()
+
+
+ localctx.(*FieldInitializerListContext)._optField = _x
+ }
+ localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField)
+ {
+ p.SetState(203)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*FieldInitializerListContext).s21 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21)
+ {
+ p.SetState(204)
+
+ var _x = p.Expr()
+
+
+ localctx.(*FieldInitializerListContext)._expr = _x
+ }
+ localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr)
+
+
+ }
+ p.SetState(210)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IOptFieldContext is an interface to support dynamic dispatch.
+type IOptFieldContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOpt returns the opt token.
+ GetOpt() antlr.Token
+
+
+ // SetOpt sets the opt token.
+ SetOpt(antlr.Token)
+
+
+ // Getter signatures
+ IDENTIFIER() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+
+ // IsOptFieldContext differentiates from other interfaces.
+ IsOptFieldContext()
+}
+
+type OptFieldContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ opt antlr.Token
+}
+
+func NewEmptyOptFieldContext() *OptFieldContext {
+ var p = new(OptFieldContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_optField
+ return p
+}
+
+func InitEmptyOptFieldContext(p *OptFieldContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_optField
+}
+
+func (*OptFieldContext) IsOptFieldContext() {}
+
+func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext {
+ var p = new(OptFieldContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_optField
+
+ return p
+}
+
+func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt }
+
+
+func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v }
+
+
+func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode {
+ return s.GetToken(CELParserIDENTIFIER, 0)
+}
+
+func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *OptFieldContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterOptField(s)
+ }
+}
+
+func (s *OptFieldContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitOptField(s)
+ }
+}
+
+func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitOptField(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) OptField() (localctx IOptFieldContext) {
+ localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 24, CELParserRULE_optField)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(212)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(211)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*OptFieldContext).opt = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(214)
+ p.Match(CELParserIDENTIFIER)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IMapInitializerListContext is an interface to support dynamic dispatch.
+type IMapInitializerListContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS21 returns the s21 token.
+ GetS21() antlr.Token
+
+
+ // SetS21 sets the s21 token.
+ SetS21(antlr.Token)
+
+
+ // GetCols returns the cols token list.
+ GetCols() []antlr.Token
+
+
+ // SetCols sets the cols token list.
+ SetCols([]antlr.Token)
+
+
+ // Get_optExpr returns the _optExpr rule contexts.
+ Get_optExpr() IOptExprContext
+
+ // Get_expr returns the _expr rule contexts.
+ Get_expr() IExprContext
+
+
+ // Set_optExpr sets the _optExpr rule contexts.
+ Set_optExpr(IOptExprContext)
+
+ // Set_expr sets the _expr rule contexts.
+ Set_expr(IExprContext)
+
+
+ // GetKeys returns the keys rule context list.
+ GetKeys() []IOptExprContext
+
+ // GetValues returns the values rule context list.
+ GetValues() []IExprContext
+
+
+ // SetKeys sets the keys rule context list.
+ SetKeys([]IOptExprContext)
+
+ // SetValues sets the values rule context list.
+ SetValues([]IExprContext)
+
+
+ // Getter signatures
+ AllOptExpr() []IOptExprContext
+ OptExpr(i int) IOptExprContext
+ AllCOLON() []antlr.TerminalNode
+ COLON(i int) antlr.TerminalNode
+ AllExpr() []IExprContext
+ Expr(i int) IExprContext
+ AllCOMMA() []antlr.TerminalNode
+ COMMA(i int) antlr.TerminalNode
+
+ // IsMapInitializerListContext differentiates from other interfaces.
+ IsMapInitializerListContext()
+}
+
+type MapInitializerListContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ _optExpr IOptExprContext
+ keys []IOptExprContext
+ s21 antlr.Token
+ cols []antlr.Token
+ _expr IExprContext
+ values []IExprContext
+}
+
+func NewEmptyMapInitializerListContext() *MapInitializerListContext {
+ var p = new(MapInitializerListContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_mapInitializerList
+ return p
+}
+
+func InitEmptyMapInitializerListContext(p *MapInitializerListContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_mapInitializerList
+}
+
+func (*MapInitializerListContext) IsMapInitializerListContext() {}
+
+func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext {
+ var p = new(MapInitializerListContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_mapInitializerList
+
+ return p
+}
+
+func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 }
+
+
+func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v }
+
+
+func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols }
+
+
+func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v }
+
+
+func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr }
+
+func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr }
+
+
+func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v }
+
+func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v }
+
+
+func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys }
+
+func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values }
+
+
+func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v }
+
+func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v }
+
+
+func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IOptExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IOptExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IOptExprContext); ok {
+ tst[i] = t.(IOptExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IOptExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IOptExprContext)
+}
+
+func (s *MapInitializerListContext) AllCOLON() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOLON)
+}
+
+func (s *MapInitializerListContext) COLON(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOLON, i)
+}
+
+func (s *MapInitializerListContext) AllExpr() []IExprContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExprContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExprContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExprContext); ok {
+ tst[i] = t.(IExprContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *MapInitializerListContext) Expr(i int) IExprContext {
+ var t antlr.RuleContext;
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *MapInitializerListContext) AllCOMMA() []antlr.TerminalNode {
+ return s.GetTokens(CELParserCOMMA)
+}
+
+func (s *MapInitializerListContext) COMMA(i int) antlr.TerminalNode {
+ return s.GetToken(CELParserCOMMA, i)
+}
+
+func (s *MapInitializerListContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterMapInitializerList(s)
+ }
+}
+
+func (s *MapInitializerListContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitMapInitializerList(s)
+ }
+}
+
+func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitMapInitializerList(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) {
+ localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList)
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(216)
+
+ var _x = p.OptExpr()
+
+
+ localctx.(*MapInitializerListContext)._optExpr = _x
+ }
+ localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
+ {
+ p.SetState(217)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*MapInitializerListContext).s21 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
+ {
+ p.SetState(218)
+
+ var _x = p.Expr()
+
+
+ localctx.(*MapInitializerListContext)._expr = _x
+ }
+ localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
+ p.SetState(226)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ {
+ p.SetState(219)
+ p.Match(CELParserCOMMA)
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ {
+ p.SetState(220)
+
+ var _x = p.OptExpr()
+
+
+ localctx.(*MapInitializerListContext)._optExpr = _x
+ }
+ localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr)
+ {
+ p.SetState(221)
+
+ var _m = p.Match(CELParserCOLON)
+
+ localctx.(*MapInitializerListContext).s21 = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+ localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21)
+ {
+ p.SetState(222)
+
+ var _x = p.Expr()
+
+
+ localctx.(*MapInitializerListContext)._expr = _x
+ }
+ localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr)
+
+
+ }
+ p.SetState(228)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext())
+ if p.HasError() {
+ goto errorExit
+ }
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// IOptExprContext is an interface to support dynamic dispatch.
+type IOptExprContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOpt returns the opt token.
+ GetOpt() antlr.Token
+
+
+ // SetOpt sets the opt token.
+ SetOpt(antlr.Token)
+
+
+ // GetE returns the e rule contexts.
+ GetE() IExprContext
+
+
+ // SetE sets the e rule contexts.
+ SetE(IExprContext)
+
+
+ // Getter signatures
+ Expr() IExprContext
+ QUESTIONMARK() antlr.TerminalNode
+
+ // IsOptExprContext differentiates from other interfaces.
+ IsOptExprContext()
+}
+
+type OptExprContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+ opt antlr.Token
+ e IExprContext
+}
+
+func NewEmptyOptExprContext() *OptExprContext {
+ var p = new(OptExprContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_optExpr
+ return p
+}
+
+func InitEmptyOptExprContext(p *OptExprContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_optExpr
+}
+
+func (*OptExprContext) IsOptExprContext() {}
+
+func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext {
+ var p = new(OptExprContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_optExpr
+
+ return p
+}
+
+func (s *OptExprContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *OptExprContext) GetOpt() antlr.Token { return s.opt }
+
+
+func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v }
+
+
+func (s *OptExprContext) GetE() IExprContext { return s.e }
+
+
+func (s *OptExprContext) SetE(v IExprContext) { s.e = v }
+
+
+func (s *OptExprContext) Expr() IExprContext {
+ var t antlr.RuleContext;
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExprContext); ok {
+ t = ctx.(antlr.RuleContext);
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *OptExprContext) QUESTIONMARK() antlr.TerminalNode {
+ return s.GetToken(CELParserQUESTIONMARK, 0)
+}
+
+func (s *OptExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterOptExpr(s)
+ }
+}
+
+func (s *OptExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitOptExpr(s)
+ }
+}
+
+func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitOptExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+
+func (p *CELParser) OptExpr() (localctx IOptExprContext) {
+ localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 28, CELParserRULE_optExpr)
+ var _la int
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(230)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(229)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*OptExprContext).opt = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(232)
+
+ var _x = p.Expr()
+
+
+ localctx.(*OptExprContext).e = _x
+ }
+
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+// ILiteralContext is an interface to support dynamic dispatch.
+type ILiteralContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+ // IsLiteralContext differentiates from other interfaces.
+ IsLiteralContext()
+}
+
+type LiteralContext struct {
+ antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyLiteralContext() *LiteralContext {
+ var p = new(LiteralContext)
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_literal
+ return p
+}
+
+func InitEmptyLiteralContext(p *LiteralContext) {
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1)
+ p.RuleIndex = CELParserRULE_literal
+}
+
+func (*LiteralContext) IsLiteralContext() {}
+
+func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext {
+ var p = new(LiteralContext)
+
+ antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_literal
+
+ return p
+}
+
+func (s *LiteralContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *LiteralContext) CopyAll(ctx *LiteralContext) {
+ s.CopyFrom(&ctx.BaseParserRuleContext)
+}
+
+func (s *LiteralContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+
+
+
+type BytesContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext {
+ var p = new(BytesContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *BytesContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BytesContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BytesContext) BYTES() antlr.TerminalNode {
+ return s.GetToken(CELParserBYTES, 0)
+}
+
+
+func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBytes(s)
+ }
+}
+
+func (s *BytesContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBytes(s)
+ }
+}
+
+func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBytes(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type UintContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext {
+ var p = new(UintContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *UintContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *UintContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *UintContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UintContext) NUM_UINT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_UINT, 0)
+}
+
+
+func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterUint(s)
+ }
+}
+
+func (s *UintContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitUint(s)
+ }
+}
+
+func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitUint(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type NullContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext {
+ var p = new(NullContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *NullContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *NullContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *NullContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *NullContext) NUL() antlr.TerminalNode {
+ return s.GetToken(CELParserNUL, 0)
+}
+
+
+func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterNull(s)
+ }
+}
+
+func (s *NullContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitNull(s)
+ }
+}
+
+func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitNull(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type BoolFalseContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext {
+ var p = new(BoolFalseContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode {
+ return s.GetToken(CELParserCEL_FALSE, 0)
+}
+
+
+func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBoolFalse(s)
+ }
+}
+
+func (s *BoolFalseContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBoolFalse(s)
+ }
+}
+
+func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBoolFalse(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type StringContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext {
+ var p = new(StringContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *StringContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *StringContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *StringContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *StringContext) STRING() antlr.TerminalNode {
+ return s.GetToken(CELParserSTRING, 0)
+}
+
+
+func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterString(s)
+ }
+}
+
+func (s *StringContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitString(s)
+ }
+}
+
+func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitString(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type DoubleContext struct {
+ LiteralContext
+ sign antlr.Token
+ tok antlr.Token
+}
+
+func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext {
+ var p = new(DoubleContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *DoubleContext) GetSign() antlr.Token { return s.sign }
+
+func (s *DoubleContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v }
+
+func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *DoubleContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *DoubleContext) NUM_FLOAT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_FLOAT, 0)
+}
+
+func (s *DoubleContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+
+func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterDouble(s)
+ }
+}
+
+func (s *DoubleContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitDouble(s)
+ }
+}
+
+func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitDouble(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type BoolTrueContext struct {
+ LiteralContext
+ tok antlr.Token
+}
+
+func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext {
+ var p = new(BoolTrueContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode {
+ return s.GetToken(CELParserCEL_TRUE, 0)
+}
+
+
+func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterBoolTrue(s)
+ }
+}
+
+func (s *BoolTrueContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitBoolTrue(s)
+ }
+}
+
+func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitBoolTrue(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+type IntContext struct {
+ LiteralContext
+ sign antlr.Token
+ tok antlr.Token
+}
+
+func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext {
+ var p = new(IntContext)
+
+ InitEmptyLiteralContext(&p.LiteralContext)
+ p.parser = parser
+ p.CopyAll(ctx.(*LiteralContext))
+
+ return p
+}
+
+
+func (s *IntContext) GetSign() antlr.Token { return s.sign }
+
+func (s *IntContext) GetTok() antlr.Token { return s.tok }
+
+
+func (s *IntContext) SetSign(v antlr.Token) { s.sign = v }
+
+func (s *IntContext) SetTok(v antlr.Token) { s.tok = v }
+
+func (s *IntContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *IntContext) NUM_INT() antlr.TerminalNode {
+ return s.GetToken(CELParserNUM_INT, 0)
+}
+
+func (s *IntContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CELParserMINUS, 0)
+}
+
+
+func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterInt(s)
+ }
+}
+
+func (s *IntContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitInt(s)
+ }
+}
+
+func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitInt(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+
+
+func (p *CELParser) Literal() (localctx ILiteralContext) {
+ localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 30, CELParserRULE_literal)
+ var _la int
+
+ p.SetState(248)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+
+ switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewIntContext(p, localctx)
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(235)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserMINUS {
+ {
+ p.SetState(234)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*IntContext).sign = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(237)
+
+ var _m = p.Match(CELParserNUM_INT)
+
+ localctx.(*IntContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 2:
+ localctx = NewUintContext(p, localctx)
+ p.EnterOuterAlt(localctx, 2)
+ {
+ p.SetState(238)
+
+ var _m = p.Match(CELParserNUM_UINT)
+
+ localctx.(*UintContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 3:
+ localctx = NewDoubleContext(p, localctx)
+ p.EnterOuterAlt(localctx, 3)
+ p.SetState(240)
+ p.GetErrorHandler().Sync(p)
+ if p.HasError() {
+ goto errorExit
+ }
+ _la = p.GetTokenStream().LA(1)
+
+
+ if _la == CELParserMINUS {
+ {
+ p.SetState(239)
+
+ var _m = p.Match(CELParserMINUS)
+
+ localctx.(*DoubleContext).sign = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ }
+ {
+ p.SetState(242)
+
+ var _m = p.Match(CELParserNUM_FLOAT)
+
+ localctx.(*DoubleContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 4:
+ localctx = NewStringContext(p, localctx)
+ p.EnterOuterAlt(localctx, 4)
+ {
+ p.SetState(243)
+
+ var _m = p.Match(CELParserSTRING)
+
+ localctx.(*StringContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 5:
+ localctx = NewBytesContext(p, localctx)
+ p.EnterOuterAlt(localctx, 5)
+ {
+ p.SetState(244)
+
+ var _m = p.Match(CELParserBYTES)
+
+ localctx.(*BytesContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 6:
+ localctx = NewBoolTrueContext(p, localctx)
+ p.EnterOuterAlt(localctx, 6)
+ {
+ p.SetState(245)
+
+ var _m = p.Match(CELParserCEL_TRUE)
+
+ localctx.(*BoolTrueContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 7:
+ localctx = NewBoolFalseContext(p, localctx)
+ p.EnterOuterAlt(localctx, 7)
+ {
+ p.SetState(246)
+
+ var _m = p.Match(CELParserCEL_FALSE)
+
+ localctx.(*BoolFalseContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+
+ case 8:
+ localctx = NewNullContext(p, localctx)
+ p.EnterOuterAlt(localctx, 8)
+ {
+ p.SetState(247)
+
+ var _m = p.Match(CELParserNUL)
+
+ localctx.(*NullContext).tok = _m
+ if p.HasError() {
+ // Recognition error - abort rule
+ goto errorExit
+ }
+ }
+
+ case antlr.ATNInvalidAltNumber:
+ goto errorExit
+ }
+
+
+errorExit:
+ if p.HasError() {
+ v := p.GetError()
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ p.SetError(nil)
+ }
+ p.ExitRule()
+ return localctx
+ goto errorExit // Trick to prevent compiler error if the label is not used
+}
+
+
+func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool {
+ switch ruleIndex {
+ case 4:
+ var t *RelationContext = nil
+ if localctx != nil { t = localctx.(*RelationContext) }
+ return p.Relation_Sempred(t, predIndex)
+
+ case 5:
+ var t *CalcContext = nil
+ if localctx != nil { t = localctx.(*CalcContext) }
+ return p.Calc_Sempred(t, predIndex)
+
+ case 7:
+ var t *MemberContext = nil
+ if localctx != nil { t = localctx.(*MemberContext) }
+ return p.Member_Sempred(t, predIndex)
+
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(ruleIndex))
+ }
+}
+
+func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ switch predIndex {
+ case 0:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
+
+func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ switch predIndex {
+ case 1:
+ return p.Precpred(p.GetParserRuleContext(), 2)
+
+ case 2:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
+
+func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool {
+ switch predIndex {
+ case 3:
+ return p.Precpred(p.GetParserRuleContext(), 3)
+
+ case 4:
+ return p.Precpred(p.GetParserRuleContext(), 2)
+
+ case 5:
+ return p.Precpred(p.GetParserRuleContext(), 1)
+
+ default:
+ panic("No predicate with index: " + fmt.Sprint(predIndex))
+ }
+}
+
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
new file mode 100644
index 000000000..d2fbd563a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
@@ -0,0 +1,110 @@
+// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT.
+
+package gen // CEL
+import "github.com/antlr4-go/antlr/v4"
+
+
+// A complete Visitor for a parse tree produced by CELParser.
+type CELVisitor interface {
+ antlr.ParseTreeVisitor
+
+ // Visit a parse tree produced by CELParser#start.
+ VisitStart(ctx *StartContext) interface{}
+
+ // Visit a parse tree produced by CELParser#expr.
+ VisitExpr(ctx *ExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#conditionalOr.
+ VisitConditionalOr(ctx *ConditionalOrContext) interface{}
+
+ // Visit a parse tree produced by CELParser#conditionalAnd.
+ VisitConditionalAnd(ctx *ConditionalAndContext) interface{}
+
+ // Visit a parse tree produced by CELParser#relation.
+ VisitRelation(ctx *RelationContext) interface{}
+
+ // Visit a parse tree produced by CELParser#calc.
+ VisitCalc(ctx *CalcContext) interface{}
+
+ // Visit a parse tree produced by CELParser#MemberExpr.
+ VisitMemberExpr(ctx *MemberExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#LogicalNot.
+ VisitLogicalNot(ctx *LogicalNotContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Negate.
+ VisitNegate(ctx *NegateContext) interface{}
+
+ // Visit a parse tree produced by CELParser#MemberCall.
+ VisitMemberCall(ctx *MemberCallContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Select.
+ VisitSelect(ctx *SelectContext) interface{}
+
+ // Visit a parse tree produced by CELParser#PrimaryExpr.
+ VisitPrimaryExpr(ctx *PrimaryExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Index.
+ VisitIndex(ctx *IndexContext) interface{}
+
+ // Visit a parse tree produced by CELParser#IdentOrGlobalCall.
+ VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Nested.
+ VisitNested(ctx *NestedContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateList.
+ VisitCreateList(ctx *CreateListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateStruct.
+ VisitCreateStruct(ctx *CreateStructContext) interface{}
+
+ // Visit a parse tree produced by CELParser#CreateMessage.
+ VisitCreateMessage(ctx *CreateMessageContext) interface{}
+
+ // Visit a parse tree produced by CELParser#ConstantLiteral.
+ VisitConstantLiteral(ctx *ConstantLiteralContext) interface{}
+
+ // Visit a parse tree produced by CELParser#exprList.
+ VisitExprList(ctx *ExprListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#listInit.
+ VisitListInit(ctx *ListInitContext) interface{}
+
+ // Visit a parse tree produced by CELParser#fieldInitializerList.
+ VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#optField.
+ VisitOptField(ctx *OptFieldContext) interface{}
+
+ // Visit a parse tree produced by CELParser#mapInitializerList.
+ VisitMapInitializerList(ctx *MapInitializerListContext) interface{}
+
+ // Visit a parse tree produced by CELParser#optExpr.
+ VisitOptExpr(ctx *OptExprContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Int.
+ VisitInt(ctx *IntContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Uint.
+ VisitUint(ctx *UintContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Double.
+ VisitDouble(ctx *DoubleContext) interface{}
+
+ // Visit a parse tree produced by CELParser#String.
+ VisitString(ctx *StringContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Bytes.
+ VisitBytes(ctx *BytesContext) interface{}
+
+ // Visit a parse tree produced by CELParser#BoolTrue.
+ VisitBoolTrue(ctx *BoolTrueContext) interface{}
+
+ // Visit a parse tree produced by CELParser#BoolFalse.
+ VisitBoolFalse(ctx *BoolFalseContext) interface{}
+
+ // Visit a parse tree produced by CELParser#Null.
+ VisitNull(ctx *NullContext) interface{}
+
+}
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/doc.go b/vendor/github.com/google/cel-go/parser/gen/doc.go
new file mode 100644
index 000000000..57edd4434
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gen contains all of the ANTLR-generated sources used by the cel-go parser.
+package gen
diff --git a/vendor/github.com/google/cel-go/parser/gen/generate.sh b/vendor/github.com/google/cel-go/parser/gen/generate.sh
new file mode 100644
index 000000000..27a9559f7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/generate.sh
@@ -0,0 +1,35 @@
+#!/bin/bash -eu
+#
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# To regenerate the CEL lexer/parser statically do the following:
+# 1. Download the latest anltr tool from https://www.antlr.org/download.html
+# 2. Copy the downloaded jar to the gen directory. It will have a name
+# like antlr--complete.jar.
+# 3. Modify the script below to refer to the current ANTLR version.
+# 4. Execute the generation script from the gen directory.
+# 5. Delete the jar and commit the regenerated sources.
+
+#!/bin/sh
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# Generate AntLR artifacts.
+java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \
+ -Dlanguage=Go \
+ -package gen \
+ -o ${DIR} \
+ -visitor ${DIR}/CEL.g4
+
diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go
new file mode 100644
index 000000000..9f09ead0e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/helper.go
@@ -0,0 +1,510 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "sync"
+
+ antlr "github.com/antlr4-go/antlr/v4"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type parserHelper struct {
+ exprFactory ast.ExprFactory
+ source common.Source
+ sourceInfo *ast.SourceInfo
+ nextID int64
+}
+
+func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper {
+ return &parserHelper{
+ exprFactory: fac,
+ source: source,
+ sourceInfo: ast.NewSourceInfo(source),
+ nextID: 1,
+ }
+}
+
+func (p *parserHelper) getSourceInfo() *ast.SourceInfo {
+ return p.sourceInfo
+}
+
+func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr {
+ return p.exprFactory.NewLiteral(p.newID(ctx), value)
+}
+
+func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr {
+ return p.newLiteral(ctx, types.Bool(value))
+}
+
+func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr {
+ return p.newLiteral(ctx, types.String(value))
+}
+
+func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr {
+ return p.newLiteral(ctx, types.Bytes(value))
+}
+
+func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr {
+ return p.newLiteral(ctx, types.Int(value))
+}
+
+func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr {
+ return p.newLiteral(ctx, types.Uint(value))
+}
+
+func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr {
+ return p.newLiteral(ctx, types.Double(value))
+}
+
+func (p *parserHelper) newIdent(ctx any, name string) ast.Expr {
+ return p.exprFactory.NewIdent(p.newID(ctx), name)
+}
+
+func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr {
+ return p.exprFactory.NewSelect(p.newID(ctx), operand, field)
+}
+
+func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr {
+ return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field)
+}
+
+func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr {
+ return p.exprFactory.NewCall(p.newID(ctx), function, args...)
+}
+
+func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...)
+}
+
+func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr {
+ return p.exprFactory.NewList(p.newID(ctx), elements, optionals)
+}
+
+func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr {
+ return p.exprFactory.NewMap(p.newID(ctx), entries)
+}
+
+func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr {
+ return p.exprFactory.NewMapEntry(entryID, key, value, optional)
+}
+
+func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr {
+ return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields)
+}
+
+func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr {
+ return p.exprFactory.NewStructField(fieldID, field, value, optional)
+}
+
+func (p *parserHelper) newComprehension(ctx any,
+ iterRange ast.Expr,
+ iterVar,
+ accuVar string,
+ accuInit ast.Expr,
+ condition ast.Expr,
+ step ast.Expr,
+ result ast.Expr) ast.Expr {
+ return p.exprFactory.NewComprehension(
+ p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result)
+}
+
+func (p *parserHelper) newComprehensionTwoVar(ctx any,
+ iterRange ast.Expr,
+ iterVar, iterVar2,
+ accuVar string,
+ accuInit ast.Expr,
+ condition ast.Expr,
+ step ast.Expr,
+ result ast.Expr) ast.Expr {
+ return p.exprFactory.NewComprehensionTwoVar(
+ p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result)
+}
+
+func (p *parserHelper) newID(ctx any) int64 {
+ if id, isID := ctx.(int64); isID {
+ return id
+ }
+ return p.id(ctx)
+}
+
+func (p *parserHelper) newExpr(ctx any) ast.Expr {
+ return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx))
+}
+
+func (p *parserHelper) id(ctx any) int64 {
+ var offset ast.OffsetRange
+ switch c := ctx.(type) {
+ case antlr.ParserRuleContext:
+ start := c.GetStart()
+ offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn()))
+ offset.Stop = offset.Start + int32(len(c.GetText()))
+ case antlr.Token:
+ offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn()))
+ offset.Stop = offset.Start + int32(len(c.GetText()))
+ case common.Location:
+ offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column()))
+ offset.Stop = offset.Start
+ case ast.OffsetRange:
+ offset = c
+ default:
+ // This should only happen if the ctx is nil
+ return -1
+ }
+ id := p.nextID
+ p.sourceInfo.SetOffsetRange(id, offset)
+ p.nextID++
+ return id
+}
+
+func (p *parserHelper) deleteID(id int64) {
+ p.sourceInfo.ClearOffsetRange(id)
+ if id == p.nextID-1 {
+ p.nextID--
+ }
+}
+
+func (p *parserHelper) getLocation(id int64) common.Location {
+ return p.sourceInfo.GetStartLocation(id)
+}
+
+func (p *parserHelper) getLocationByOffset(offset int32) common.Location {
+ return p.getSourceInfo().GetLocationByOffset(offset)
+}
+
+// buildMacroCallArg iterates the expression and returns a new expression
+// where all macros have been replaced by their IDs in MacroCalls
+func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr {
+ if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found {
+ return p.exprFactory.NewUnspecifiedExpr(expr.ID())
+ }
+
+ switch expr.Kind() {
+ case ast.CallKind:
+ // Iterate the AST from `expr` recursively looking for macros. Because we are at most
+ // starting from the top level macro, this recursion is bounded by the size of the AST. This
+ // means that the depth check on the AST during parsing will catch recursion overflows
+ // before we get to here.
+ call := expr.AsCall()
+ macroArgs := make([]ast.Expr, len(call.Args()))
+ for index, arg := range call.Args() {
+ macroArgs[index] = p.buildMacroCallArg(arg)
+ }
+ if !call.IsMemberFunction() {
+ return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...)
+ }
+ macroTarget := p.buildMacroCallArg(call.Target())
+ return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...)
+ case ast.ListKind:
+ list := expr.AsList()
+ macroListArgs := make([]ast.Expr, list.Size())
+ for i, elem := range list.Elements() {
+ macroListArgs[i] = p.buildMacroCallArg(elem)
+ }
+ return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices())
+ }
+ return expr
+}
+
+// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target
+// that are macros, their ID will be stored instead for later self-lookups.
+func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) {
+ macroArgs := make([]ast.Expr, len(args))
+ for index, arg := range args {
+ macroArgs[index] = p.buildMacroCallArg(arg)
+ }
+ if target == nil {
+ p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...))
+ return
+ }
+ macroTarget := target
+ if _, found := p.sourceInfo.GetMacroCall(target.ID()); found {
+ macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID())
+ } else {
+ macroTarget = p.buildMacroCallArg(target)
+ }
+ p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...))
+}
+
+// logicManager compacts logical trees into a more efficient structure which is semantically
+// equivalent with how the logic graph is constructed by the ANTLR parser.
+//
+// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, ||
+// operators which have a tendency to create long DAGs which are skewed in one direction. Since the
+// operators are commutative re-ordering the terms *must not* affect the evaluation result.
+//
+// The logic manager will either render the terms to N-chained && / || operators as a single logical
+// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat
+// controversial choice as it alters the traditional order of execution assumptions present in most
+// expressions.
+type logicManager struct {
+ exprFactory ast.ExprFactory
+ function string
+ terms []ast.Expr
+ ops []int64
+ variadicASTs bool
+}
+
+// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term.
+func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager {
+ return &logicManager{
+ exprFactory: fac,
+ function: function,
+ terms: []ast.Expr{term},
+ ops: []int64{},
+ variadicASTs: true,
+ }
+}
+
+// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term.
+func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager {
+ return &logicManager{
+ exprFactory: fac,
+ function: function,
+ terms: []ast.Expr{term},
+ ops: []int64{},
+ variadicASTs: false,
+ }
+}
+
+// addTerm adds an operation identifier and term to the set of terms to be balanced.
+func (l *logicManager) addTerm(op int64, term ast.Expr) {
+ l.terms = append(l.terms, term)
+ l.ops = append(l.ops, op)
+}
+
+// toExpr renders the logic graph into an Expr value, either balancing a tree of logical
+// operations or creating a variadic representation of the logical operator.
+func (l *logicManager) toExpr() ast.Expr {
+ if len(l.terms) == 1 {
+ return l.terms[0]
+ }
+ if l.variadicASTs {
+ return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...)
+ }
+ return l.balancedTree(0, len(l.ops)-1)
+}
+
+// balancedTree recursively balances the terms provided to a commutative operator.
+func (l *logicManager) balancedTree(lo, hi int) ast.Expr {
+ mid := (lo + hi + 1) / 2
+
+ var left ast.Expr
+ if mid == lo {
+ left = l.terms[mid]
+ } else {
+ left = l.balancedTree(lo, mid-1)
+ }
+
+ var right ast.Expr
+ if mid == hi {
+ right = l.terms[mid+1]
+ } else {
+ right = l.balancedTree(mid+1, hi)
+ }
+ return l.exprFactory.NewCall(l.ops[mid], l.function, left, right)
+}
+
+type exprHelper struct {
+ *parserHelper
+ id int64
+}
+
+func (e *exprHelper) nextMacroID() int64 {
+ return e.parserHelper.id(e.parserHelper.getLocation(e.id))
+}
+
+// Copy implements the ExprHelper interface method by producing a copy of the input Expr value
+// with a fresh set of numeric identifiers the Expr and all its descendants.
+func (e *exprHelper) Copy(expr ast.Expr) ast.Expr {
+ offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID())
+ copyID := e.parserHelper.newID(offsetRange)
+ switch expr.Kind() {
+ case ast.LiteralKind:
+ return e.exprFactory.NewLiteral(copyID, expr.AsLiteral())
+ case ast.IdentKind:
+ return e.exprFactory.NewIdent(copyID, expr.AsIdent())
+ case ast.SelectKind:
+ sel := expr.AsSelect()
+ op := e.Copy(sel.Operand())
+ if sel.IsTestOnly() {
+ return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName())
+ }
+ return e.exprFactory.NewSelect(copyID, op, sel.FieldName())
+ case ast.CallKind:
+ call := expr.AsCall()
+ args := call.Args()
+ argsCopy := make([]ast.Expr, len(args))
+ for i, arg := range args {
+ argsCopy[i] = e.Copy(arg)
+ }
+ if !call.IsMemberFunction() {
+ return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...)
+ }
+ return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...)
+ case ast.ListKind:
+ list := expr.AsList()
+ elems := list.Elements()
+ elemsCopy := make([]ast.Expr, len(elems))
+ for i, elem := range elems {
+ elemsCopy[i] = e.Copy(elem)
+ }
+ return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices())
+ case ast.MapKind:
+ m := expr.AsMap()
+ entries := m.Entries()
+ entriesCopy := make([]ast.EntryExpr, len(entries))
+ for i, en := range entries {
+ entry := en.AsMapEntry()
+ entryID := e.nextMacroID()
+ entriesCopy[i] = e.exprFactory.NewMapEntry(entryID,
+ e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional())
+ }
+ return e.exprFactory.NewMap(copyID, entriesCopy)
+ case ast.StructKind:
+ s := expr.AsStruct()
+ fields := s.Fields()
+ fieldsCopy := make([]ast.EntryExpr, len(fields))
+ for i, f := range fields {
+ field := f.AsStructField()
+ fieldID := e.nextMacroID()
+ fieldsCopy[i] = e.exprFactory.NewStructField(fieldID,
+ field.Name(), e.Copy(field.Value()), field.IsOptional())
+ }
+ return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy)
+ case ast.ComprehensionKind:
+ compre := expr.AsComprehension()
+ iterRange := e.Copy(compre.IterRange())
+ accuInit := e.Copy(compre.AccuInit())
+ cond := e.Copy(compre.LoopCondition())
+ step := e.Copy(compre.LoopStep())
+ result := e.Copy(compre.Result())
+ // All comprehensions can be represented by the two-variable comprehension since the
+ // differentiation between one and two-variable is whether the iterVar2 value is non-empty.
+ return e.exprFactory.NewComprehensionTwoVar(copyID,
+ iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result)
+ }
+ return e.exprFactory.NewUnspecifiedExpr(copyID)
+}
+
+// NewLiteral implements the ExprHelper interface method.
+func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr {
+ return e.exprFactory.NewLiteral(e.nextMacroID(), value)
+}
+
+// NewList implements the ExprHelper interface method.
+func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr {
+ return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{})
+}
+
+// NewMap implements the ExprHelper interface method.
+func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr {
+ return e.exprFactory.NewMap(e.nextMacroID(), entries)
+}
+
+// NewMapEntry implements the ExprHelper interface method.
+func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr {
+ return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional)
+}
+
+// NewStruct implements the ExprHelper interface method.
+func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr {
+ return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits)
+}
+
+// NewStructField implements the ExprHelper interface method.
+func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr {
+ return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional)
+}
+
+// NewComprehension implements the ExprHelper interface method.
+func (e *exprHelper) NewComprehension(
+ iterRange ast.Expr,
+ iterVar string,
+ accuVar string,
+ accuInit ast.Expr,
+ condition ast.Expr,
+ step ast.Expr,
+ result ast.Expr) ast.Expr {
+ return e.exprFactory.NewComprehension(
+ e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result)
+}
+
+// NewComprehensionTwoVar implements the ExprHelper interface method.
+func (e *exprHelper) NewComprehensionTwoVar(
+ iterRange ast.Expr,
+ iterVar,
+ iterVar2,
+ accuVar string,
+ accuInit,
+ condition,
+ step,
+ result ast.Expr) ast.Expr {
+ return e.exprFactory.NewComprehensionTwoVar(
+ e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result)
+}
+
+// NewIdent implements the ExprHelper interface method.
+func (e *exprHelper) NewIdent(name string) ast.Expr {
+ return e.exprFactory.NewIdent(e.nextMacroID(), name)
+}
+
+// NewAccuIdent implements the ExprHelper interface method.
+func (e *exprHelper) NewAccuIdent() ast.Expr {
+ return e.exprFactory.NewAccuIdent(e.nextMacroID())
+}
+
+// NewGlobalCall implements the ExprHelper interface method.
+func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr {
+ return e.exprFactory.NewCall(e.nextMacroID(), function, args...)
+}
+
+// NewMemberCall implements the ExprHelper interface method.
+func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...)
+}
+
+// NewPresenceTest implements the ExprHelper interface method.
+func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr {
+ return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field)
+}
+
+// NewSelect implements the ExprHelper interface method.
+func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr {
+ return e.exprFactory.NewSelect(e.nextMacroID(), operand, field)
+}
+
+// OffsetLocation implements the ExprHelper interface method.
+func (e *exprHelper) OffsetLocation(exprID int64) common.Location {
+ return e.parserHelper.sourceInfo.GetStartLocation(exprID)
+}
+
+// NewError associates an error message with a given expression id, populating the source offset location of the error if possible.
+func (e *exprHelper) NewError(exprID int64, message string) *common.Error {
+ return common.NewError(exprID, message, e.OffsetLocation(exprID))
+}
+
+var (
+ // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations.
+ exprHelperPool = &sync.Pool{
+ New: func() any {
+ return &exprHelper{}
+ },
+ }
+)
diff --git a/vendor/github.com/google/cel-go/parser/input.go b/vendor/github.com/google/cel-go/parser/input.go
new file mode 100644
index 000000000..44792455d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/input.go
@@ -0,0 +1,129 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ antlr "github.com/antlr4-go/antlr/v4"
+
+ "github.com/google/cel-go/common/runes"
+)
+
+type charStream struct {
+ buf runes.Buffer
+ pos int
+ src string
+}
+
+// Consume implements (antlr.CharStream).Consume.
+func (c *charStream) Consume() {
+ if c.pos >= c.buf.Len() {
+ panic("cannot consume EOF")
+ }
+ c.pos++
+}
+
+// LA implements (antlr.CharStream).LA.
+func (c *charStream) LA(offset int) int {
+ if offset == 0 {
+ return 0
+ }
+ if offset < 0 {
+ offset++
+ }
+ pos := c.pos + offset - 1
+ if pos < 0 || pos >= c.buf.Len() {
+ return antlr.TokenEOF
+ }
+ return int(c.buf.Get(pos))
+}
+
+// LT mimics (*antlr.InputStream).LT.
+func (c *charStream) LT(offset int) int {
+ return c.LA(offset)
+}
+
+// Mark implements (antlr.CharStream).Mark.
+func (c *charStream) Mark() int {
+ return -1
+}
+
+// Release implements (antlr.CharStream).Release.
+func (c *charStream) Release(marker int) {}
+
+// Index implements (antlr.CharStream).Index.
+func (c *charStream) Index() int {
+ return c.pos
+}
+
+// Seek implements (antlr.CharStream).Seek.
+func (c *charStream) Seek(index int) {
+ if index <= c.pos {
+ c.pos = index
+ return
+ }
+ if index < c.buf.Len() {
+ c.pos = index
+ } else {
+ c.pos = c.buf.Len()
+ }
+}
+
+// Size implements (antlr.CharStream).Size.
+func (c *charStream) Size() int {
+ return c.buf.Len()
+}
+
+// GetSourceName implements (antlr.CharStream).GetSourceName.
+func (c *charStream) GetSourceName() string {
+ return c.src
+}
+
+// GetText implements (antlr.CharStream).GetText.
+func (c *charStream) GetText(start, stop int) string {
+ if stop >= c.buf.Len() {
+ stop = c.buf.Len() - 1
+ }
+ if start >= c.buf.Len() {
+ return ""
+ }
+ return c.buf.Slice(start, stop+1)
+}
+
+// GetTextFromTokens implements (antlr.CharStream).GetTextFromTokens.
+func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string {
+ if start != nil && stop != nil {
+ return c.GetText(start.GetTokenIndex(), stop.GetTokenIndex())
+ }
+ return ""
+}
+
+// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval.
+func (c *charStream) GetTextFromInterval(i antlr.Interval) string {
+ return c.GetText(i.Start, i.Stop)
+}
+
+// String mimics (*antlr.InputStream).String.
+func (c *charStream) String() string {
+ return c.buf.Slice(0, c.buf.Len())
+}
+
+var _ antlr.CharStream = &charStream{}
+
+func newCharStream(buf runes.Buffer, desc string) antlr.CharStream {
+ return &charStream{
+ buf: buf,
+ src: desc,
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go
new file mode 100644
index 000000000..dc47b4203
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/macro.go
@@ -0,0 +1,428 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// NewGlobalMacro creates a Macro for a global function with the specified arg count.
+func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ argCount: argCount,
+ expander: expander}
+}
+
+// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count.
+func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ argCount: argCount,
+ expander: expander,
+ receiverStyle: true}
+}
+
+// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count.
+func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ expander: expander,
+ varArgStyle: true}
+}
+
+// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count.
+func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
+ return ¯o{
+ function: function,
+ expander: expander,
+ receiverStyle: true,
+ varArgStyle: true}
+}
+
+// Macro interface for describing the function signature to match and the MacroExpander to apply.
+//
+// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function,
+// a Macro should be created per arg-count.
+type Macro interface {
+ // Function name to match.
+ Function() string
+
+ // ArgCount for the function call.
+ //
+ // When the macro is a var-arg style macro, the return value will be zero, but the MacroKey
+ // will contain a `*` where the arg count would have been.
+ ArgCount() int
+
+ // IsReceiverStyle returns true if the macro matches a receiver style call.
+ IsReceiverStyle() bool
+
+ // MacroKey returns the macro signatures accepted by this macro.
+ //
+ // Format: `::`.
+ //
+ // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`.
+ MacroKey() string
+
+ // Expander returns the MacroExpander to apply when the macro key matches the parsed call
+ // signature.
+ Expander() MacroExpander
+}
+
+// Macro type which declares the function name and arg count expected for the
+// macro, as well as a macro expansion function.
+type macro struct {
+ function string
+ receiverStyle bool
+ varArgStyle bool
+ argCount int
+ expander MacroExpander
+}
+
+// Function returns the macro's function name (i.e. the function whose syntax it mimics).
+func (m *macro) Function() string {
+ return m.function
+}
+
+// ArgCount returns the number of arguments the macro expects.
+func (m *macro) ArgCount() int {
+ return m.argCount
+}
+
+// IsReceiverStyle returns whether the macro is receiver style.
+func (m *macro) IsReceiverStyle() bool {
+ return m.receiverStyle
+}
+
+// Expander implements the Macro interface method.
+func (m *macro) Expander() MacroExpander {
+ return m.expander
+}
+
+// MacroKey implements the Macro interface method.
+func (m *macro) MacroKey() string {
+ if m.varArgStyle {
+ return makeVarArgMacroKey(m.function, m.receiverStyle)
+ }
+ return makeMacroKey(m.function, m.argCount, m.receiverStyle)
+}
+
+func makeMacroKey(name string, args int, receiverStyle bool) string {
+ return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle)
+}
+
+func makeVarArgMacroKey(name string, receiverStyle bool) string {
+ return fmt.Sprintf("%s:*:%v", name, receiverStyle)
+}
+
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
+//
+// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
+// and produces as output an Expr ast node.
+//
+// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil.
+type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error)
+
+// ExprHelper assists with the creation of Expr values in a manner which is consistent
+// the internal semantics and id generation behaviors of the parser and checker libraries.
+type ExprHelper interface {
+ // Copy the input expression with a brand new set of identifiers.
+ Copy(ast.Expr) ast.Expr
+
+ // Literal creates an Expr value for a scalar literal value.
+ NewLiteral(value ref.Val) ast.Expr
+
+ // NewList creates a list literal instruction with an optional set of elements.
+ NewList(elems ...ast.Expr) ast.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...ast.EntryExpr) ast.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr
+
+ // NewStruct creates a struct literal expression with an optional set of field initializers.
+ NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr
+
+ // NewStructField creates a new struct field initializer from the field name and value.
+ NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr
+
+ // NewComprehension creates a new one-variable comprehension instruction.
+ //
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - iterVar is the variable name for the list element value, or the map key, depending on the
+ // range type.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ NewComprehension(iterRange ast.Expr,
+ iterVar,
+ accuVar string,
+ accuInit,
+ condition,
+ step,
+ result ast.Expr) ast.Expr
+
+ // NewComprehensionTwoVar creates a new two-variable comprehension instruction.
+ //
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - iterVar is the iteration variable assigned to the list index or the map key.
+ // - iterVar2 is the iteration variable assigned to the list element value or the map key value.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ NewComprehensionTwoVar(iterRange ast.Expr,
+ iterVar,
+ iterVar2,
+ accuVar string,
+ accuInit,
+ condition,
+ step,
+ result ast.Expr) ast.Expr
+
+ // NewIdent creates an identifier Expr value.
+ NewIdent(name string) ast.Expr
+
+ // NewAccuIdent returns an accumulator identifier for use with comprehension results.
+ NewAccuIdent() ast.Expr
+
+ // NewCall creates a function call Expr value for a global (free) function.
+ NewCall(function string, args ...ast.Expr) ast.Expr
+
+ // NewMemberCall creates a function call Expr value for a receiver-style function.
+ NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr
+
+ // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ NewPresenceTest(operand ast.Expr, field string) ast.Expr
+
+ // NewSelect create a field traversal Expr value.
+ NewSelect(operand ast.Expr, field string) ast.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+
+ // NewError associates an error message with a given expression id.
+ NewError(exprID int64, message string) *common.Error
+}
+
+var (
+ // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to
+ // specify the field as a string.
+ HasMacro = NewGlobalMacro(operators.Has, 1, MakeHas)
+
+ // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all
+ // elements in the range satisfy the predicate.
+ AllMacro = NewReceiverMacro(operators.All, 2, MakeAll)
+
+ // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that
+ // some element in the range satisfies the predicate.
+ ExistsMacro = NewReceiverMacro(operators.Exists, 2, MakeExists)
+
+ // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one
+ // element in range the predicate holds.
+ ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne)
+
+ // MapMacro expands "range.map(var, function)" into a comprehension which applies the function
+ // to each element in the range to produce a new list.
+ MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap)
+
+ // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which
+ // first filters the elements in the range by the predicate, then applies the transform function
+ // to produce a new list.
+ MapFilterMacro = NewReceiverMacro(operators.Map, 3, MakeMap)
+
+ // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters
+ // elements in the range, producing a new list from the elements that satisfy the predicate.
+ FilterMacro = NewReceiverMacro(operators.Filter, 2, MakeFilter)
+
+ // AllMacros includes the list of all spec-supported macros.
+ AllMacros = []Macro{
+ HasMacro,
+ AllMacro,
+ ExistsMacro,
+ ExistsOneMacro,
+ MapMacro,
+ MapFilterMacro,
+ FilterMacro,
+ }
+
+ // NoMacros list.
+ NoMacros = []Macro{}
+)
+
+// AccumulatorName is the traditional variable name assigned to the fold accumulator variable.
+const AccumulatorName = "__result__"
+
+type quantifierKind int
+
+const (
+ quantifierAll quantifierKind = iota
+ quantifierExists
+ quantifierExistsOne
+)
+
+// MakeAll expands the input call arguments into a comprehension that returns true if all of the
+// elements in the range match the predicate expressions:
+// .all(, )
+func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ return makeQuantifier(quantifierAll, eh, target, args)
+}
+
+// MakeExists expands the input call arguments into a comprehension that returns true if any of the
+// elements in the range match the predicate expressions:
+// .exists(, )
+func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ return makeQuantifier(quantifierExists, eh, target, args)
+}
+
+// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly
+// one of the elements in the range match the predicate expressions:
+// .exists_one(, )
+func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ return makeQuantifier(quantifierExistsOne, eh, target, args)
+}
+
+// MakeMap expands the input call arguments into a comprehension that transforms each element in the
+// input to produce an output list.
+//
+// There are two call patterns supported by map:
+//
+// .map(, )
+// .map(, , )
+//
+// In the second form only iterVar values which return true when provided to the predicate expression
+// are transformed.
+func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].ID(), "argument is not an identifier")
+ }
+
+ var fn ast.Expr
+ var filter ast.Expr
+
+ if len(args) == 3 {
+ filter = args[1]
+ fn = args[2]
+ } else {
+ filter = nil
+ fn = args[1]
+ }
+
+ init := eh.NewList()
+ condition := eh.NewLiteral(types.True)
+ step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(fn))
+
+ if filter != nil {
+ step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent())
+ }
+ return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil
+}
+
+// MakeFilter expands the input call arguments into a comprehension which produces a list which contains
+// only elements which match the provided predicate expression:
+// .filter(, )
+func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].ID(), "argument is not an identifier")
+ }
+
+ filter := args[1]
+ init := eh.NewList()
+ condition := eh.NewLiteral(types.True)
+ step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0]))
+ step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent())
+ return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil
+}
+
+// MakeHas expands the input call arguments into a presence test, e.g. has(.field)
+func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ if args[0].Kind() == ast.SelectKind {
+ s := args[0].AsSelect()
+ return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil
+ }
+ return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro")
+}
+
+func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, eh.NewError(args[0].ID(), "argument must be a simple name")
+ }
+
+ var init ast.Expr
+ var condition ast.Expr
+ var step ast.Expr
+ var result ast.Expr
+ switch kind {
+ case quantifierAll:
+ init = eh.NewLiteral(types.True)
+ condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent())
+ step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1])
+ result = eh.NewAccuIdent()
+ case quantifierExists:
+ init = eh.NewLiteral(types.False)
+ condition = eh.NewCall(
+ operators.NotStrictlyFalse,
+ eh.NewCall(operators.LogicalNot, eh.NewAccuIdent()))
+ step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1])
+ result = eh.NewAccuIdent()
+ case quantifierExistsOne:
+ init = eh.NewLiteral(types.Int(0))
+ condition = eh.NewLiteral(types.True)
+ step = eh.NewCall(operators.Conditional, args[1],
+ eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent())
+ result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1)))
+ default:
+ return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind))
+ }
+ return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil
+}
+
+func extractIdent(e ast.Expr) (string, bool) {
+ switch e.Kind() {
+ case ast.IdentKind:
+ return e.AsIdent(), true
+ }
+ return "", false
+}
diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go
new file mode 100644
index 000000000..61fc3adec
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/options.go
@@ -0,0 +1,140 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import "fmt"
+
+type options struct {
+ maxRecursionDepth int
+ errorReportingLimit int
+ errorRecoveryTokenLookaheadLimit int
+ errorRecoveryLimit int
+ expressionSizeCodePointLimit int
+ macros map[string]Macro
+ populateMacroCalls bool
+ enableOptionalSyntax bool
+ enableVariadicOperatorASTs bool
+}
+
+// Option configures the behavior of the parser.
+type Option func(*options) error
+
+// MaxRecursionDepth limits the maximum depth the parser will attempt to parse the expression before giving up.
+func MaxRecursionDepth(limit int) Option {
+ return func(opts *options) error {
+ if limit < -1 {
+ return fmt.Errorf("max recursion depth must be greater than or equal to -1: %d", limit)
+ }
+ opts.maxRecursionDepth = limit
+ return nil
+ }
+}
+
+// ErrorRecoveryLookaheadTokenLimit limits the number of lexer tokens that may be considered during error recovery.
+//
+// Error recovery often involves looking ahead in the input to determine if there's a point at which parsing may
+// successfully resume. In some pathological cases, the parser can look through quite a large set of input which
+// in turn generates a lot of back-tracking and performance degredation.
+//
+// The limit must be >= 1, and is recommended to be less than the default of 256.
+func ErrorRecoveryLookaheadTokenLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < 1 {
+ return fmt.Errorf("error recovery lookahead token limit must be at least 1: %d", limit)
+ }
+ opts.errorRecoveryTokenLookaheadLimit = limit
+ return nil
+ }
+}
+
+// ErrorRecoveryLimit limits the number of attempts the parser will perform to recover from an error.
+func ErrorRecoveryLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < -1 {
+ return fmt.Errorf("error recovery limit must be greater than or equal to -1: %d", limit)
+ }
+ opts.errorRecoveryLimit = limit
+ return nil
+ }
+}
+
+// ErrorReportingLimit limits the number of syntax error reports before terminating parsing.
+//
+// The limit must be at least 1. If unset, the limit will be 100.
+func ErrorReportingLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < 1 {
+ return fmt.Errorf("error reporting limit must be at least 1: %d", limit)
+ }
+ opts.errorReportingLimit = limit
+ return nil
+ }
+}
+
+// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an
+// expression.
+func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option {
+ return func(opts *options) error {
+ if expressionSizeCodePointLimit < -1 {
+ return fmt.Errorf("expression size code point limit must be greater than or equal to -1: %d", expressionSizeCodePointLimit)
+ }
+ opts.expressionSizeCodePointLimit = expressionSizeCodePointLimit
+ return nil
+ }
+}
+
+// Macros adds the given macros to the parser.
+func Macros(macros ...Macro) Option {
+ return func(opts *options) error {
+ for _, m := range macros {
+ if m != nil {
+ if opts.macros == nil {
+ opts.macros = make(map[string]Macro)
+ }
+ opts.macros[m.MacroKey()] = m
+ }
+ }
+ return nil
+ }
+}
+
+// PopulateMacroCalls ensures that the original call signatures replaced by expanded macros
+// are preserved in the `SourceInfo` of parse result.
+func PopulateMacroCalls(populateMacroCalls bool) Option {
+ return func(opts *options) error {
+ opts.populateMacroCalls = populateMacroCalls
+ return nil
+ }
+}
+
+// EnableOptionalSyntax enables syntax for optional field and index selection.
+func EnableOptionalSyntax(optionalSyntax bool) Option {
+ return func(opts *options) error {
+ opts.enableOptionalSyntax = optionalSyntax
+ return nil
+ }
+}
+
+// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative
+// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])`
+//
+// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested
+// logic graphs.
+func EnableVariadicOperatorASTs(varArgASTs bool) Option {
+ return func(opts *options) error {
+ opts.enableVariadicOperatorASTs = varArgASTs
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go
new file mode 100644
index 000000000..5cbb17672
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/parser.go
@@ -0,0 +1,1012 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parser declares an expression parser with support for macro
+// expansion.
+package parser
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ antlr "github.com/antlr4-go/antlr/v4"
+
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/runes"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/parser/gen"
+)
+
+// Parser encapsulates the context necessary to perform parsing for different expressions.
+type Parser struct {
+ options
+}
+
+// NewParser builds and returns a new Parser using the provided options.
+func NewParser(opts ...Option) (*Parser, error) {
+ p := &Parser{}
+ for _, opt := range opts {
+ if err := opt(&p.options); err != nil {
+ return nil, err
+ }
+ }
+ if p.errorReportingLimit == 0 {
+ p.errorReportingLimit = 100
+ }
+ if p.maxRecursionDepth == 0 {
+ p.maxRecursionDepth = 250
+ }
+ if p.maxRecursionDepth == -1 {
+ p.maxRecursionDepth = int((^uint(0)) >> 1)
+ }
+ if p.errorRecoveryTokenLookaheadLimit == 0 {
+ p.errorRecoveryTokenLookaheadLimit = 256
+ }
+ if p.errorRecoveryLimit == 0 {
+ p.errorRecoveryLimit = 30
+ }
+ if p.errorRecoveryLimit == -1 {
+ p.errorRecoveryLimit = int((^uint(0)) >> 1)
+ }
+ if p.expressionSizeCodePointLimit == 0 {
+ p.expressionSizeCodePointLimit = 100_000
+ }
+ if p.expressionSizeCodePointLimit == -1 {
+ p.expressionSizeCodePointLimit = int((^uint(0)) >> 1)
+ }
+ // Bool is false by default, so populateMacroCalls will be false by default
+ return p, nil
+}
+
+// mustNewParser does the work of NewParser and panics if an error occurs.
+//
+// This function is only intended for internal use and is for backwards compatibility in Parse and
+// ParseWithMacros, where we know the options will result in an error.
+func mustNewParser(opts ...Option) *Parser {
+ p, err := NewParser(opts...)
+ if err != nil {
+ panic(err)
+ }
+ return p
+}
+
+// Parse parses the expression represented by source and returns the result.
+func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) {
+ errs := common.NewErrors(source)
+ fac := ast.NewExprFactory()
+ impl := parser{
+ errors: &parseErrors{errs},
+ exprFactory: fac,
+ helper: newParserHelper(source, fac),
+ macros: p.macros,
+ maxRecursionDepth: p.maxRecursionDepth,
+ errorReportingLimit: p.errorReportingLimit,
+ errorRecoveryLimit: p.errorRecoveryLimit,
+ errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit,
+ populateMacroCalls: p.populateMacroCalls,
+ enableOptionalSyntax: p.enableOptionalSyntax,
+ enableVariadicOperatorASTs: p.enableVariadicOperatorASTs,
+ }
+ buf, ok := source.(runes.Buffer)
+ if !ok {
+ buf = runes.NewBuffer(source.Content())
+ }
+ var out ast.Expr
+ if buf.Len() > p.expressionSizeCodePointLimit {
+ out = impl.reportError(common.NoLocation,
+ "expression code point size exceeds limit: size: %d, limit %d",
+ buf.Len(), p.expressionSizeCodePointLimit)
+ } else {
+ out = impl.parse(buf, source.Description())
+ }
+ return ast.NewAST(out, impl.helper.getSourceInfo()), errs
+}
+
+// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid
+// field names for protos, and it would complicate the grammar to distinguish the cases.
+var reservedIds = map[string]struct{}{
+ "as": {},
+ "break": {},
+ "const": {},
+ "continue": {},
+ "else": {},
+ "false": {},
+ "for": {},
+ "function": {},
+ "if": {},
+ "import": {},
+ "in": {},
+ "let": {},
+ "loop": {},
+ "package": {},
+ "namespace": {},
+ "null": {},
+ "return": {},
+ "true": {},
+ "var": {},
+ "void": {},
+ "while": {},
+}
+
+// Parse converts a source input a parsed expression.
+// This function calls ParseWithMacros with AllMacros.
+//
+// Deprecated: Use NewParser().Parse() instead.
+func Parse(source common.Source) (*ast.AST, *common.Errors) {
+ return mustNewParser(Macros(AllMacros...)).Parse(source)
+}
+
+type recursionError struct {
+ message string
+}
+
+// Error implements error.
+func (re *recursionError) Error() string {
+ return re.message
+}
+
+var _ error = &recursionError{}
+
+type recursionListener struct {
+ maxDepth int
+ ruleTypeDepth map[int]*int
+}
+
+func (rl *recursionListener) VisitTerminal(node antlr.TerminalNode) {}
+
+func (rl *recursionListener) VisitErrorNode(node antlr.ErrorNode) {}
+
+func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
+ if ctx == nil {
+ return
+ }
+ ruleIndex := ctx.GetRuleIndex()
+ depth, found := rl.ruleTypeDepth[ruleIndex]
+ if !found {
+ var counter = 1
+ rl.ruleTypeDepth[ruleIndex] = &counter
+ depth = &counter
+ } else {
+ *depth++
+ }
+ if *depth > rl.maxDepth {
+ panic(&recursionError{
+ message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth),
+ })
+ }
+}
+
+func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) {
+ if ctx == nil {
+ return
+ }
+ ruleIndex := ctx.GetRuleIndex()
+ if depth, found := rl.ruleTypeDepth[ruleIndex]; found && *depth > 0 {
+ *depth--
+ }
+}
+
+var _ antlr.ParseTreeListener = &recursionListener{}
+
+type tooManyErrors struct {
+ errorReportingLimit int
+}
+
+func (t *tooManyErrors) Error() string {
+ return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit)
+}
+
+var _ error = &tooManyErrors{}
+
+type recoveryLimitError struct {
+ message string
+}
+
+// Error implements error.
+func (rl *recoveryLimitError) Error() string {
+ return rl.message
+}
+
+type lookaheadLimitError struct {
+ message string
+}
+
+func (ll *lookaheadLimitError) Error() string {
+ return ll.message
+}
+
+var _ error = &recoveryLimitError{}
+
+type recoveryLimitErrorStrategy struct {
+ *antlr.DefaultErrorStrategy
+ errorRecoveryLimit int
+ errorRecoveryTokenLookaheadLimit int
+ recoveryAttempts int
+}
+
+type lookaheadConsumer struct {
+ antlr.Parser
+ errorRecoveryTokenLookaheadLimit int
+ lookaheadAttempts int
+}
+
+func (lc *lookaheadConsumer) Consume() antlr.Token {
+ if lc.lookaheadAttempts >= lc.errorRecoveryTokenLookaheadLimit {
+ panic(&lookaheadLimitError{
+ message: fmt.Sprintf("error recovery token lookahead limit exceeded: %d", lc.errorRecoveryTokenLookaheadLimit),
+ })
+ }
+ lc.lookaheadAttempts++
+ return lc.Parser.Consume()
+}
+
+func (rl *recoveryLimitErrorStrategy) Recover(recognizer antlr.Parser, e antlr.RecognitionException) {
+ rl.checkAttempts(recognizer)
+ lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
+ rl.DefaultErrorStrategy.Recover(lc, e)
+}
+
+func (rl *recoveryLimitErrorStrategy) RecoverInline(recognizer antlr.Parser) antlr.Token {
+ rl.checkAttempts(recognizer)
+ lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit}
+ return rl.DefaultErrorStrategy.RecoverInline(lc)
+}
+
+func (rl *recoveryLimitErrorStrategy) checkAttempts(recognizer antlr.Parser) {
+ if rl.recoveryAttempts == rl.errorRecoveryLimit {
+ rl.recoveryAttempts++
+ msg := fmt.Sprintf("error recovery attempt limit exceeded: %d", rl.errorRecoveryLimit)
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+ panic(&recoveryLimitError{
+ message: msg,
+ })
+ }
+ rl.recoveryAttempts++
+}
+
+var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{}
+
+type parser struct {
+ gen.BaseCELVisitor
+ errors *parseErrors
+ exprFactory ast.ExprFactory
+ helper *parserHelper
+ macros map[string]Macro
+ recursionDepth int
+ errorReports int
+ maxRecursionDepth int
+ errorReportingLimit int
+ errorRecoveryLimit int
+ errorRecoveryLookaheadTokenLimit int
+ populateMacroCalls bool
+ enableOptionalSyntax bool
+ enableVariadicOperatorASTs bool
+}
+
+var _ gen.CELVisitor = (*parser)(nil)
+
+func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr {
+ lexer := gen.NewCELLexer(newCharStream(expr, desc))
+ lexer.RemoveErrorListeners()
+ lexer.AddErrorListener(p)
+
+ prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0))
+ prsr.RemoveErrorListeners()
+
+ prsrListener := &recursionListener{
+ maxDepth: p.maxRecursionDepth,
+ ruleTypeDepth: map[int]*int{},
+ }
+
+ prsr.AddErrorListener(p)
+ prsr.AddParseListener(prsrListener)
+
+ prsr.SetErrorHandler(&recoveryLimitErrorStrategy{
+ DefaultErrorStrategy: antlr.NewDefaultErrorStrategy(),
+ errorRecoveryLimit: p.errorRecoveryLimit,
+ errorRecoveryTokenLookaheadLimit: p.errorRecoveryLookaheadTokenLimit,
+ })
+
+ defer func() {
+ if val := recover(); val != nil {
+ switch err := val.(type) {
+ case *lookaheadLimitError:
+ p.errors.internalError(err.Error())
+ case *recursionError:
+ p.errors.internalError(err.Error())
+ case *tooManyErrors:
+ // do nothing
+ case *recoveryLimitError:
+ // do nothing, listeners already notified and error reported.
+ default:
+ panic(val)
+ }
+ }
+ }()
+
+ return p.Visit(prsr.Start_()).(ast.Expr)
+}
+
+// Visitor implementations.
+func (p *parser) Visit(tree antlr.ParseTree) any {
+ t := unnest(tree)
+ switch tree := t.(type) {
+ case *gen.StartContext:
+ return p.VisitStart(tree)
+ case *gen.ExprContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitExpr(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.ConditionalAndContext:
+ return p.VisitConditionalAnd(tree)
+ case *gen.ConditionalOrContext:
+ return p.VisitConditionalOr(tree)
+ case *gen.RelationContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitRelation(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.CalcContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitCalc(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.LogicalNotContext:
+ return p.VisitLogicalNot(tree)
+ case *gen.IdentOrGlobalCallContext:
+ return p.VisitIdentOrGlobalCall(tree)
+ case *gen.SelectContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitSelect(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MemberCallContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitMemberCall(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MapInitializerListContext:
+ return p.VisitMapInitializerList(tree)
+ case *gen.NegateContext:
+ return p.VisitNegate(tree)
+ case *gen.IndexContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitIndex(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.UnaryContext:
+ return p.VisitUnary(tree)
+ case *gen.CreateListContext:
+ return p.VisitCreateList(tree)
+ case *gen.CreateMessageContext:
+ return p.VisitCreateMessage(tree)
+ case *gen.CreateStructContext:
+ return p.VisitCreateStruct(tree)
+ case *gen.IntContext:
+ return p.VisitInt(tree)
+ case *gen.UintContext:
+ return p.VisitUint(tree)
+ case *gen.DoubleContext:
+ return p.VisitDouble(tree)
+ case *gen.StringContext:
+ return p.VisitString(tree)
+ case *gen.BytesContext:
+ return p.VisitBytes(tree)
+ case *gen.BoolFalseContext:
+ return p.VisitBoolFalse(tree)
+ case *gen.BoolTrueContext:
+ return p.VisitBoolTrue(tree)
+ case *gen.NullContext:
+ return p.VisitNull(tree)
+ }
+
+ // Report at least one error if the parser reaches an unknown parse element.
+ // Typically, this happens if the parser has already encountered a syntax error elsewhere.
+ if p.errors.errorCount() == 0 {
+ txt := "<>"
+ if t != nil {
+ txt = fmt.Sprintf("<<%T>>", t)
+ }
+ return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt)
+ }
+ return p.helper.newExpr(common.NoLocation)
+
+}
+
+// Visit a parse tree produced by CELParser#start.
+func (p *parser) VisitStart(ctx *gen.StartContext) any {
+ return p.Visit(ctx.Expr())
+}
+
+// Visit a parse tree produced by CELParser#expr.
+func (p *parser) VisitExpr(ctx *gen.ExprContext) any {
+ result := p.Visit(ctx.GetE()).(ast.Expr)
+ if ctx.GetOp() == nil {
+ return result
+ }
+ opID := p.helper.id(ctx.GetOp())
+ ifTrue := p.Visit(ctx.GetE1()).(ast.Expr)
+ ifFalse := p.Visit(ctx.GetE2()).(ast.Expr)
+ return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse)
+}
+
+// Visit a parse tree produced by CELParser#conditionalOr.
+func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
+ result := p.Visit(ctx.GetE()).(ast.Expr)
+ l := p.newLogicManager(operators.LogicalOr, result)
+ rest := ctx.GetE1()
+ for i, op := range ctx.GetOps() {
+ if i >= len(rest) {
+ return p.reportError(ctx, "unexpected character, wanted '||'")
+ }
+ next := p.Visit(rest[i]).(ast.Expr)
+ opID := p.helper.id(op)
+ l.addTerm(opID, next)
+ }
+ return l.toExpr()
+}
+
+// Visit a parse tree produced by CELParser#conditionalAnd.
+func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any {
+ result := p.Visit(ctx.GetE()).(ast.Expr)
+ l := p.newLogicManager(operators.LogicalAnd, result)
+ rest := ctx.GetE1()
+ for i, op := range ctx.GetOps() {
+ if i >= len(rest) {
+ return p.reportError(ctx, "unexpected character, wanted '&&'")
+ }
+ next := p.Visit(rest[i]).(ast.Expr)
+ opID := p.helper.id(op)
+ l.addTerm(opID, next)
+ }
+ return l.toExpr()
+}
+
+// Visit a parse tree produced by CELParser#relation.
+func (p *parser) VisitRelation(ctx *gen.RelationContext) any {
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Relation(0)).(ast.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Relation(1)).(ast.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+// Visit a parse tree produced by CELParser#calc.
+func (p *parser) VisitCalc(ctx *gen.CalcContext) any {
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Calc(0)).(ast.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Calc(1)).(ast.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+func (p *parser) VisitUnary(ctx *gen.UnaryContext) any {
+ return p.helper.newLiteralString(ctx, "<>")
+}
+
+// Visit a parse tree produced by CELParser#LogicalNot.
+func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(ast.Expr)
+ return p.globalCallOrMacro(opID, operators.LogicalNot, target)
+}
+
+func (p *parser) VisitNegate(ctx *gen.NegateContext) any {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(ast.Expr)
+ return p.globalCallOrMacro(opID, operators.Negate, target)
+}
+
+// VisitSelect visits a parse tree produced by CELParser#Select.
+func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
+ operand := p.Visit(ctx.Member()).(ast.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil || ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ id := ctx.GetId().GetText()
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '.?'")
+ }
+ return p.helper.newGlobalCall(
+ ctx.GetOp(),
+ operators.OptSelect,
+ operand,
+ p.helper.newLiteralString(ctx.GetId(), id))
+ }
+ return p.helper.newSelect(ctx.GetOp(), operand, id)
+}
+
+// VisitMemberCall visits a parse tree produced by CELParser#MemberCall.
+func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
+ operand := p.Visit(ctx.Member()).(ast.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ id := ctx.GetId().GetText()
+ opID := p.helper.id(ctx.GetOpen())
+ return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...)
+}
+
+// Visit a parse tree produced by CELParser#Index.
+func (p *parser) VisitIndex(ctx *gen.IndexContext) any {
+ target := p.Visit(ctx.Member()).(ast.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ opID := p.helper.id(ctx.GetOp())
+ index := p.Visit(ctx.GetIndex()).(ast.Expr)
+ operator := operators.Index
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '[?'")
+ }
+ operator = operators.OptIndex
+ }
+ return p.globalCallOrMacro(opID, operator, target, index)
+}
+
+// Visit a parse tree produced by CELParser#CreateMessage.
+func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
+ messageName := ""
+ for _, id := range ctx.GetIds() {
+ if len(messageName) != 0 {
+ messageName += "."
+ }
+ messageName += id.GetText()
+ }
+ if ctx.GetLeadingDot() != nil {
+ messageName = "." + messageName
+ }
+ objID := p.helper.id(ctx.GetOp())
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr)
+ return p.helper.newObject(objID, messageName, entries...)
+}
+
+// Visit a parse tree of field initializers.
+func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any {
+ if ctx == nil || ctx.GetFields() == nil {
+ // This is the result of a syntax error handled elswhere, return empty.
+ return []ast.EntryExpr{}
+ }
+
+ result := make([]ast.EntryExpr, len(ctx.GetFields()))
+ cols := ctx.GetCols()
+ vals := ctx.GetValues()
+ for i, f := range ctx.GetFields() {
+ if i >= len(cols) || i >= len(vals) {
+ // This is the result of a syntax error detected elsewhere.
+ return []ast.EntryExpr{}
+ }
+ initID := p.helper.id(cols[i])
+ optField := f.(*gen.OptFieldContext)
+ optional := optField.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optField, "unsupported syntax '?'")
+ continue
+ }
+ // The field may be empty due to a prior error.
+ id := optField.IDENTIFIER()
+ if id == nil {
+ return []ast.EntryExpr{}
+ }
+ fieldName := id.GetText()
+ value := p.Visit(vals[i]).(ast.Expr)
+ field := p.helper.newObjectField(initID, fieldName, value, optional)
+ result[i] = field
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
+func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any {
+ identName := ""
+ if ctx.GetLeadingDot() != nil {
+ identName = "."
+ }
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ // Handle reserved identifiers.
+ id := ctx.GetId().GetText()
+ if _, ok := reservedIds[id]; ok {
+ return p.reportError(ctx, "reserved identifier: %s", id)
+ }
+ identName += id
+ if ctx.GetOp() != nil {
+ opID := p.helper.id(ctx.GetOp())
+ return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...)
+ }
+ return p.helper.newIdent(ctx.GetId(), identName)
+}
+
+// Visit a parse tree produced by CELParser#CreateList.
+func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any {
+ listID := p.helper.id(ctx.GetOp())
+ elems, optionals := p.visitListInit(ctx.GetElems())
+ return p.helper.newList(listID, elems, optionals...)
+}
+
+// Visit a parse tree produced by CELParser#CreateStruct.
+func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
+ structID := p.helper.id(ctx.GetOp())
+ entries := []ast.EntryExpr{}
+ if ctx.GetEntries() != nil {
+ entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr)
+ }
+ return p.helper.newMap(structID, entries...)
+}
+
+// Visit a parse tree produced by CELParser#mapInitializerList.
+func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any {
+ if ctx == nil || ctx.GetKeys() == nil {
+ // This is the result of a syntax error handled elswhere, return empty.
+ return []ast.EntryExpr{}
+ }
+
+ result := make([]ast.EntryExpr, len(ctx.GetCols()))
+ keys := ctx.GetKeys()
+ vals := ctx.GetValues()
+ for i, col := range ctx.GetCols() {
+ colID := p.helper.id(col)
+ if i >= len(keys) || i >= len(vals) {
+ // This is the result of a syntax error detected elsewhere.
+ return []ast.EntryExpr{}
+ }
+ optKey := keys[i]
+ optional := optKey.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optKey, "unsupported syntax '?'")
+ continue
+ }
+ key := p.Visit(optKey.GetE()).(ast.Expr)
+ value := p.Visit(vals[i]).(ast.Expr)
+ entry := p.helper.newMapEntry(colID, key, value, optional)
+ result[i] = entry
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#Int.
+func (p *parser) VisitInt(ctx *gen.IntContext) any {
+ text := ctx.GetTok().GetText()
+ base := 10
+ if strings.HasPrefix(text, "0x") {
+ base = 16
+ text = text[2:]
+ }
+ if ctx.GetSign() != nil {
+ text = ctx.GetSign().GetText() + text
+ }
+ i, err := strconv.ParseInt(text, base, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid int literal")
+ }
+ return p.helper.newLiteralInt(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Uint.
+func (p *parser) VisitUint(ctx *gen.UintContext) any {
+ text := ctx.GetTok().GetText()
+ // trim the 'u' designator included in the uint literal.
+ text = text[:len(text)-1]
+ base := 10
+ if strings.HasPrefix(text, "0x") {
+ base = 16
+ text = text[2:]
+ }
+ i, err := strconv.ParseUint(text, base, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid uint literal")
+ }
+ return p.helper.newLiteralUint(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Double.
+func (p *parser) VisitDouble(ctx *gen.DoubleContext) any {
+ txt := ctx.GetTok().GetText()
+ if ctx.GetSign() != nil {
+ txt = ctx.GetSign().GetText() + txt
+ }
+ f, err := strconv.ParseFloat(txt, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid double literal")
+ }
+ return p.helper.newLiteralDouble(ctx, f)
+
+}
+
+// Visit a parse tree produced by CELParser#String.
+func (p *parser) VisitString(ctx *gen.StringContext) any {
+ s := p.unquote(ctx, ctx.GetText(), false)
+ return p.helper.newLiteralString(ctx, s)
+}
+
+// Visit a parse tree produced by CELParser#Bytes.
+func (p *parser) VisitBytes(ctx *gen.BytesContext) any {
+ b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true))
+ return p.helper.newLiteralBytes(ctx, b)
+}
+
+// Visit a parse tree produced by CELParser#BoolTrue.
+func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any {
+ return p.helper.newLiteralBool(ctx, true)
+}
+
+// Visit a parse tree produced by CELParser#BoolFalse.
+func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any {
+ return p.helper.newLiteralBool(ctx, false)
+}
+
+// Visit a parse tree produced by CELParser#Null.
+func (p *parser) VisitNull(ctx *gen.NullContext) any {
+ return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue)
+}
+
+func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr {
+ if ctx == nil {
+ return []ast.Expr{}
+ }
+ return p.visitSlice(ctx.GetE())
+}
+
+func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) {
+ if ctx == nil {
+ return []ast.Expr{}, []int32{}
+ }
+ elements := ctx.GetElems()
+ result := make([]ast.Expr, len(elements))
+ optionals := []int32{}
+ for i, e := range elements {
+ ex := p.Visit(e.GetE()).(ast.Expr)
+ if ex == nil {
+ return []ast.Expr{}, []int32{}
+ }
+ result[i] = ex
+ if e.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ p.reportError(e.GetOpt(), "unsupported syntax '?'")
+ continue
+ }
+ optionals = append(optionals, int32(i))
+ }
+ }
+ return result, optionals
+}
+
+func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr {
+ if expressions == nil {
+ return []ast.Expr{}
+ }
+ result := make([]ast.Expr, len(expressions))
+ for i, e := range expressions {
+ ex := p.Visit(e).(ast.Expr)
+ result[i] = ex
+ }
+ return result
+}
+
+func (p *parser) unquote(ctx any, value string, isBytes bool) string {
+ text, err := unescape(value, isBytes)
+ if err != nil {
+ p.reportError(ctx, "%s", err.Error())
+ return value
+ }
+ return text
+}
+
+func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager {
+ if p.enableVariadicOperatorASTs {
+ return newVariadicLogicManager(p.exprFactory, function, term)
+ }
+ return newBalancingLogicManager(p.exprFactory, function, term)
+}
+
+func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr {
+ var location common.Location
+ err := p.helper.newExpr(ctx)
+ switch c := ctx.(type) {
+ case common.Location:
+ location = c
+ case antlr.Token, antlr.ParserRuleContext:
+ location = p.helper.getLocation(err.ID())
+ }
+ // Provide arguments to the report error.
+ p.errors.reportErrorAtID(err.ID(), location, format, args...)
+ return err
+}
+
+// ANTLR Parse listener implementations
+func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
+ offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column))
+ l := p.helper.getLocationByOffset(offset)
+ // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word
+ // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error
+ // messages out of ANTLR to prevent future breaking changes related to error message content.
+ if strings.Contains(msg, "no viable alternative") {
+ msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier)
+ }
+ // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a
+ // seriously broken expression.
+ if p.errorReports < p.errorReportingLimit {
+ p.errorReports++
+ p.errors.syntaxError(l, msg)
+ } else {
+ tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit}
+ p.errors.syntaxError(l, tme.Error())
+ panic(tme)
+ }
+}
+
+func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr {
+ if expr, found := p.expandMacro(exprID, function, nil, args...); found {
+ return expr
+ }
+ return p.helper.newGlobalCall(exprID, function, args...)
+}
+
+func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr {
+ if expr, found := p.expandMacro(exprID, function, target, args...); found {
+ return expr
+ }
+ return p.helper.newReceiverCall(exprID, function, target, args...)
+}
+
+func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) {
+ macro, found := p.macros[makeMacroKey(function, len(args), target != nil)]
+ if !found {
+ macro, found = p.macros[makeVarArgMacroKey(function, target != nil)]
+ if !found {
+ return nil, false
+ }
+ }
+ eh := exprHelperPool.Get().(*exprHelper)
+ defer exprHelperPool.Put(eh)
+ eh.parserHelper = p.helper
+ eh.id = exprID
+ expr, err := macro.Expander()(eh, target, args)
+ // An error indicates that the macro was matched, but the arguments were not well-formed.
+ if err != nil {
+ loc := err.Location
+ if loc == nil {
+ loc = p.helper.getLocation(exprID)
+ }
+ p.helper.deleteID(exprID)
+ return p.reportError(loc, err.Message), true
+ }
+ // A nil value from the macro indicates that the macro implementation decided that
+ // an expansion should not be performed.
+ if expr == nil {
+ return nil, false
+ }
+ if p.populateMacroCalls {
+ p.helper.addMacroCall(expr.ID(), function, target, args...)
+ }
+ p.helper.deleteID(exprID)
+ return expr, true
+}
+
+func (p *parser) checkAndIncrementRecursionDepth() {
+ p.recursionDepth++
+ if p.recursionDepth > p.maxRecursionDepth {
+ panic(&recursionError{message: "max recursion depth exceeded"})
+ }
+}
+
+func (p *parser) decrementRecursionDepth() {
+ p.recursionDepth--
+}
+
+// unnest traverses down the left-hand side of the parse graph until it encounters the first compound
+// parse node or the first leaf in the parse graph.
+func unnest(tree antlr.ParseTree) antlr.ParseTree {
+ for tree != nil {
+ switch t := tree.(type) {
+ case *gen.ExprContext:
+ // conditionalOr op='?' conditionalOr : expr
+ if t.GetOp() != nil {
+ return t
+ }
+ // conditionalOr
+ tree = t.GetE()
+ case *gen.ConditionalOrContext:
+ // conditionalAnd (ops=|| conditionalAnd)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // conditionalAnd
+ tree = t.GetE()
+ case *gen.ConditionalAndContext:
+ // relation (ops=&& relation)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // relation
+ tree = t.GetE()
+ case *gen.RelationContext:
+ // relation op relation
+ if t.GetOp() != nil {
+ return t
+ }
+ // calc
+ tree = t.Calc()
+ case *gen.CalcContext:
+ // calc op calc
+ if t.GetOp() != nil {
+ return t
+ }
+ // unary
+ tree = t.Unary()
+ case *gen.MemberExprContext:
+ // member expands to one of: primary, select, index, or create message
+ tree = t.Member()
+ case *gen.PrimaryExprContext:
+ // primary expands to one of identifier, nested, create list, create struct, literal
+ tree = t.Primary()
+ case *gen.NestedContext:
+ // contains a nested 'expr'
+ tree = t.GetE()
+ case *gen.ConstantLiteralContext:
+ // expands to a primitive literal
+ tree = t.Literal()
+ default:
+ return t
+ }
+ }
+ return tree
+}
+
+var (
+ reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'")
+ mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER"
+)
diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go
new file mode 100644
index 000000000..27c57a9f3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/unescape.go
@@ -0,0 +1,237 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// Unescape takes a quoted string, unquotes, and unescapes it.
+//
+// This function performs escaping compatible with GoogleSQL.
+func unescape(value string, isBytes bool) (string, error) {
+ // All strings normalize newlines to the \n representation.
+ value = newlineNormalizer.Replace(value)
+ n := len(value)
+
+ // Nothing to unescape / decode.
+ if n < 2 {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Raw string preceded by the 'r|R' prefix.
+ isRawLiteral := false
+ if value[0] == 'r' || value[0] == 'R' {
+ value = value[1:]
+ n = len(value)
+ isRawLiteral = true
+ }
+
+ // Quoted string of some form, must have same first and last char.
+ if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Normalize the multi-line CEL string representation to a standard
+ // Go quoted string.
+ if n >= 6 {
+ if strings.HasPrefix(value, "'''") {
+ if !strings.HasSuffix(value, "'''") {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ } else if strings.HasPrefix(value, `"""`) {
+ if !strings.HasSuffix(value, `"""`) {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ }
+ n = len(value)
+ }
+ value = value[1 : n-1]
+ // If there is nothing to escape, then return.
+ if isRawLiteral || !strings.ContainsRune(value, '\\') {
+ return value, nil
+ }
+
+ // Otherwise the string contains escape characters.
+ // The following logic is adapted from `strconv/quote.go`
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*n/2)
+ for len(value) > 0 {
+ c, encode, rest, err := unescapeChar(value, isBytes)
+ if err != nil {
+ return "", err
+ }
+ value = rest
+ if c < utf8.RuneSelf || !encode {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ return string(buf), nil
+}
+
+// unescapeChar takes a string input and returns the following info:
+//
+// value - the escaped unicode rune at the front of the string.
+// encode - the value should be unicode-encoded
+// tail - the remainder of the input string.
+// err - error value, if the character could not be unescaped.
+//
+// When encode is true the return value may still fit within a single byte,
+// but unicode encoding is attempted which is more expensive than when the
+// value is known to self-represent as a single byte.
+//
+// If isBytes is set, unescape as a bytes literal so octal and hex escapes
+// represent byte values, not unicode code points.
+func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) {
+ // 1. Character is not an escape sequence.
+ switch c := s[0]; {
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // 2. Last character is the start of an escape sequence.
+ if len(s) <= 1 {
+ err = fmt.Errorf("unable to unescape string, found '\\' as last character")
+ return
+ }
+
+ c := s[1]
+ s = s[2:]
+ // 3. Common escape sequences shared with Google SQL
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '\\':
+ value = '\\'
+ case '\'':
+ value = '\''
+ case '"':
+ value = '"'
+ case '`':
+ value = '`'
+ case '?':
+ value = '?'
+
+ // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
+ case 'x', 'X', 'u', 'U':
+ n := 0
+ encode = true
+ switch c {
+ case 'x', 'X':
+ n = 2
+ encode = !isBytes
+ case 'u':
+ n = 4
+ if isBytes {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ case 'U':
+ n = 8
+ if isBytes {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ }
+ var v rune
+ if len(s) < n {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if !isBytes && v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+
+ // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
+ case '0', '1', '2', '3':
+ if len(s) < 2 {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v := rune(c - '0')
+ for j := 0; j < 2; j++ {
+ x := s[j]
+ if x < '0' || x > '7' {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v = v*8 + rune(x-'0')
+ }
+ if !isBytes && v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ s = s[2:]
+ encode = !isBytes
+
+ // Unknown escape sequence.
+ default:
+ err = fmt.Errorf("unable to unescape string")
+ }
+
+ tail = s
+ return
+}
+
+func unhex(b byte) (rune, bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return 0, false
+}
+
+var (
+ newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
+)
diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go
new file mode 100644
index 000000000..91cf72944
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/unparser.go
@@ -0,0 +1,629 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/google/cel-go/common/ast"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+)
+
+// Unparse takes an input expression and source position information and generates a human-readable
+// expression.
+//
+// Note, unparsing an AST will often generate the same expression as was originally parsed, but some
+// formatting may be lost in translation, notably:
+//
+// - All quoted literals are doubled quoted.
+// - Byte literals are represented as octal escapes (same as Google SQL).
+// - Floating point values are converted to the small number of digits needed to represent the value.
+// - Spacing around punctuation marks may be lost.
+// - Parentheses will only be applied when they affect operator precedence.
+//
+// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as
+// performing word wrapping on expressions.
+func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) {
+ unparserOpts := &unparserOption{
+ wrapOnColumn: defaultWrapOnColumn,
+ wrapAfterColumnLimit: defaultWrapAfterColumnLimit,
+ operatorsToWrapOn: defaultOperatorsToWrapOn,
+ }
+
+ var err error
+ for _, opt := range opts {
+ unparserOpts, err = opt(unparserOpts)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ un := &unparser{
+ info: info,
+ options: unparserOpts,
+ }
+ err = un.visit(expr)
+ if err != nil {
+ return "", err
+ }
+ return un.str.String(), nil
+}
+
+// unparser visits an expression to reconstruct a human-readable string from an AST.
+type unparser struct {
+ str strings.Builder
+ info *ast.SourceInfo
+ options *unparserOption
+ lastWrappedIndex int
+}
+
+func (un *unparser) visit(expr ast.Expr) error {
+ if expr == nil {
+ return errors.New("unsupported expression")
+ }
+ visited, err := un.visitMaybeMacroCall(expr)
+ if visited || err != nil {
+ return err
+ }
+ switch expr.Kind() {
+ case ast.CallKind:
+ return un.visitCall(expr)
+ case ast.LiteralKind:
+ return un.visitConst(expr)
+ case ast.IdentKind:
+ return un.visitIdent(expr)
+ case ast.ListKind:
+ return un.visitList(expr)
+ case ast.MapKind:
+ return un.visitStructMap(expr)
+ case ast.SelectKind:
+ return un.visitSelect(expr)
+ case ast.StructKind:
+ return un.visitStructMsg(expr)
+ default:
+ return fmt.Errorf("unsupported expression: %v", expr)
+ }
+}
+
+func (un *unparser) visitCall(expr ast.Expr) error {
+ c := expr.AsCall()
+ fun := c.FunctionName()
+ switch fun {
+ // ternary operator
+ case operators.Conditional:
+ return un.visitCallConditional(expr)
+ // optional select operator
+ case operators.OptSelect:
+ return un.visitOptSelect(expr)
+ // index operator
+ case operators.Index:
+ return un.visitCallIndex(expr)
+ // optional index operator
+ case operators.OptIndex:
+ return un.visitCallOptIndex(expr)
+ // unary operators
+ case operators.LogicalNot, operators.Negate:
+ return un.visitCallUnary(expr)
+ // binary operators
+ case operators.Add,
+ operators.Divide,
+ operators.Equals,
+ operators.Greater,
+ operators.GreaterEquals,
+ operators.In,
+ operators.Less,
+ operators.LessEquals,
+ operators.LogicalAnd,
+ operators.LogicalOr,
+ operators.Modulo,
+ operators.Multiply,
+ operators.NotEquals,
+ operators.OldIn,
+ operators.Subtract:
+ return un.visitCallBinary(expr)
+ // standard function calls.
+ default:
+ return un.visitCallFunc(expr)
+ }
+}
+
+func (un *unparser) visitCallBinary(expr ast.Expr) error {
+ c := expr.AsCall()
+ fun := c.FunctionName()
+ args := c.Args()
+ lhs := args[0]
+ // add parens if the current operator is lower precedence than the lhs expr operator.
+ lhsParen := isComplexOperatorWithRespectTo(fun, lhs)
+ rhs := args[1]
+ // add parens if the current operator is lower precedence than the rhs expr operator,
+ // or the same precedence and the operator is left recursive.
+ rhsParen := isComplexOperatorWithRespectTo(fun, rhs)
+ if !rhsParen && isLeftRecursive(fun) {
+ rhsParen = isSamePrecedence(fun, rhs)
+ }
+ err := un.visitMaybeNested(lhs, lhsParen)
+ if err != nil {
+ return err
+ }
+ unmangled, found := operators.FindReverseBinaryOperator(fun)
+ if !found {
+ return fmt.Errorf("cannot unmangle operator: %s", fun)
+ }
+
+ un.writeOperatorWithWrapping(fun, unmangled)
+ return un.visitMaybeNested(rhs, rhsParen)
+}
+
+func (un *unparser) visitCallConditional(expr ast.Expr) error {
+ c := expr.AsCall()
+ args := c.Args()
+ // add parens if operand is a conditional itself.
+ nested := isSamePrecedence(operators.Conditional, args[0]) ||
+ isComplexOperator(args[0])
+ err := un.visitMaybeNested(args[0], nested)
+ if err != nil {
+ return err
+ }
+ un.writeOperatorWithWrapping(operators.Conditional, "?")
+
+ // add parens if operand is a conditional itself.
+ nested = isSamePrecedence(operators.Conditional, args[1]) ||
+ isComplexOperator(args[1])
+ err = un.visitMaybeNested(args[1], nested)
+ if err != nil {
+ return err
+ }
+
+ un.str.WriteString(" : ")
+ // add parens if operand is a conditional itself.
+ nested = isSamePrecedence(operators.Conditional, args[2]) ||
+ isComplexOperator(args[2])
+
+ return un.visitMaybeNested(args[2], nested)
+}
+
+func (un *unparser) visitCallFunc(expr ast.Expr) error {
+ c := expr.AsCall()
+ fun := c.FunctionName()
+ args := c.Args()
+ if c.IsMemberFunction() {
+ nested := isBinaryOrTernaryOperator(c.Target())
+ err := un.visitMaybeNested(c.Target(), nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(".")
+ }
+ un.str.WriteString(fun)
+ un.str.WriteString("(")
+ for i, arg := range args {
+ err := un.visit(arg)
+ if err != nil {
+ return err
+ }
+ if i < len(args)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString(")")
+ return nil
+}
+
+func (un *unparser) visitCallIndex(expr ast.Expr) error {
+ return un.visitCallIndexInternal(expr, "[")
+}
+
+func (un *unparser) visitCallOptIndex(expr ast.Expr) error {
+ return un.visitCallIndexInternal(expr, "[?")
+}
+
+func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error {
+ c := expr.AsCall()
+ args := c.Args()
+ nested := isBinaryOrTernaryOperator(args[0])
+ err := un.visitMaybeNested(args[0], nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(op)
+ err = un.visit(args[1])
+ if err != nil {
+ return err
+ }
+ un.str.WriteString("]")
+ return nil
+}
+
+func (un *unparser) visitCallUnary(expr ast.Expr) error {
+ c := expr.AsCall()
+ fun := c.FunctionName()
+ args := c.Args()
+ unmangled, found := operators.FindReverse(fun)
+ if !found {
+ return fmt.Errorf("cannot unmangle operator: %s", fun)
+ }
+ un.str.WriteString(unmangled)
+ nested := isComplexOperator(args[0])
+ return un.visitMaybeNested(args[0], nested)
+}
+
+func (un *unparser) visitConst(expr ast.Expr) error {
+ val := expr.AsLiteral()
+ switch val := val.(type) {
+ case types.Bool:
+ un.str.WriteString(strconv.FormatBool(bool(val)))
+ case types.Bytes:
+ // bytes constants are surrounded with b""
+ un.str.WriteString(`b"`)
+ un.str.WriteString(bytesToOctets([]byte(val)))
+ un.str.WriteString(`"`)
+ case types.Double:
+ // represent the float using the minimum required digits
+ d := strconv.FormatFloat(float64(val), 'g', -1, 64)
+ un.str.WriteString(d)
+ if !strings.Contains(d, ".") {
+ un.str.WriteString(".0")
+ }
+ case types.Int:
+ i := strconv.FormatInt(int64(val), 10)
+ un.str.WriteString(i)
+ case types.Null:
+ un.str.WriteString("null")
+ case types.String:
+ // strings will be double quoted with quotes escaped.
+ un.str.WriteString(strconv.Quote(string(val)))
+ case types.Uint:
+ // uint literals have a 'u' suffix.
+ ui := strconv.FormatUint(uint64(val), 10)
+ un.str.WriteString(ui)
+ un.str.WriteString("u")
+ default:
+ return fmt.Errorf("unsupported constant: %v", expr)
+ }
+ return nil
+}
+
+func (un *unparser) visitIdent(expr ast.Expr) error {
+ un.str.WriteString(expr.AsIdent())
+ return nil
+}
+
+func (un *unparser) visitList(expr ast.Expr) error {
+ l := expr.AsList()
+ elems := l.Elements()
+ optIndices := make(map[int]bool, len(elems))
+ for _, idx := range l.OptionalIndices() {
+ optIndices[int(idx)] = true
+ }
+ un.str.WriteString("[")
+ for i, elem := range elems {
+ if optIndices[i] {
+ un.str.WriteString("?")
+ }
+ err := un.visit(elem)
+ if err != nil {
+ return err
+ }
+ if i < len(elems)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("]")
+ return nil
+}
+
+func (un *unparser) visitOptSelect(expr ast.Expr) error {
+ c := expr.AsCall()
+ args := c.Args()
+ operand := args[0]
+ field := args[1].AsLiteral().(types.String)
+ return un.visitSelectInternal(operand, false, ".?", string(field))
+}
+
+func (un *unparser) visitSelect(expr ast.Expr) error {
+ sel := expr.AsSelect()
+ return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName())
+}
+
+func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error {
+ // handle the case when the select expression was generated by the has() macro.
+ if testOnly {
+ un.str.WriteString("has(")
+ }
+ nested := !testOnly && isBinaryOrTernaryOperator(operand)
+ err := un.visitMaybeNested(operand, nested)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(op)
+ un.str.WriteString(field)
+ if testOnly {
+ un.str.WriteString(")")
+ }
+ return nil
+}
+
+func (un *unparser) visitStructMsg(expr ast.Expr) error {
+ m := expr.AsStruct()
+ fields := m.Fields()
+ un.str.WriteString(m.TypeName())
+ un.str.WriteString("{")
+ for i, f := range fields {
+ field := f.AsStructField()
+ f := field.Name()
+ if field.IsOptional() {
+ un.str.WriteString("?")
+ }
+ un.str.WriteString(f)
+ un.str.WriteString(": ")
+ v := field.Value()
+ err := un.visit(v)
+ if err != nil {
+ return err
+ }
+ if i < len(fields)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("}")
+ return nil
+}
+
+func (un *unparser) visitStructMap(expr ast.Expr) error {
+ m := expr.AsMap()
+ entries := m.Entries()
+ un.str.WriteString("{")
+ for i, e := range entries {
+ entry := e.AsMapEntry()
+ k := entry.Key()
+ if entry.IsOptional() {
+ un.str.WriteString("?")
+ }
+ err := un.visit(k)
+ if err != nil {
+ return err
+ }
+ un.str.WriteString(": ")
+ v := entry.Value()
+ err = un.visit(v)
+ if err != nil {
+ return err
+ }
+ if i < len(entries)-1 {
+ un.str.WriteString(", ")
+ }
+ }
+ un.str.WriteString("}")
+ return nil
+}
+
+func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) {
+ call, found := un.info.GetMacroCall(expr.ID())
+ if !found {
+ return false, nil
+ }
+ return true, un.visit(call)
+}
+
+func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error {
+ if nested {
+ un.str.WriteString("(")
+ }
+ err := un.visit(expr)
+ if err != nil {
+ return err
+ }
+ if nested {
+ un.str.WriteString(")")
+ }
+ return nil
+}
+
+// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as
+// this can have an effect of how parentheses affect the order of operations in the AST.
+func isLeftRecursive(op string) bool {
+ return op != operators.LogicalAnd && op != operators.LogicalOr
+}
+
+// isSamePrecedence indicates whether the precedence of the input operator is the same as the
+// precedence of the (possible) operation represented in the input Expr.
+//
+// If the expr is not a Call, the result is false.
+func isSamePrecedence(op string, expr ast.Expr) bool {
+ if expr.Kind() != ast.CallKind {
+ return false
+ }
+ c := expr.AsCall()
+ other := c.FunctionName()
+ return operators.Precedence(op) == operators.Precedence(other)
+}
+
+// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence
+// than the (possible) operation represented in the input Expr.
+//
+// If the expr is not a Call, the result is false.
+func isLowerPrecedence(op string, expr ast.Expr) bool {
+ c := expr.AsCall()
+ other := c.FunctionName()
+ return operators.Precedence(op) < operators.Precedence(other)
+}
+
+// Indicates whether the expr is a complex operator, i.e., a call expression
+// with 2 or more arguments.
+func isComplexOperator(expr ast.Expr) bool {
+ if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 {
+ return true
+ }
+ return false
+}
+
+// Indicates whether it is a complex operation compared to another.
+// expr is *not* considered complex if it is not a call expression or has
+// less than two arguments, or if it has a higher precedence than op.
+func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool {
+ if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 {
+ return false
+ }
+ return isLowerPrecedence(op, expr)
+}
+
+// Indicate whether this is a binary or ternary operator.
+func isBinaryOrTernaryOperator(expr ast.Expr) bool {
+ if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 {
+ return false
+ }
+ _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName())
+ return isBinaryOp || isSamePrecedence(operators.Conditional, expr)
+}
+
+// bytesToOctets converts byte sequences to a string using a three digit octal encoded value
+// per byte.
+func bytesToOctets(byteVal []byte) string {
+ var b strings.Builder
+ for _, c := range byteVal {
+ fmt.Fprintf(&b, "\\%03o", c)
+ }
+ return b.String()
+}
+
+// writeOperatorWithWrapping outputs the operator and inserts a newline for operators configured
+// in the unparser options.
+func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool {
+ _, wrapOperatorExists := un.options.operatorsToWrapOn[fun]
+ lineLength := un.str.Len() - un.lastWrappedIndex + len(fun)
+
+ if wrapOperatorExists && lineLength >= un.options.wrapOnColumn {
+ un.lastWrappedIndex = un.str.Len()
+ // wrapAfterColumnLimit flag dictates whether the newline is placed
+ // before or after the operator
+ if un.options.wrapAfterColumnLimit {
+ // Input: a && b
+ // Output: a &&\nb
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString("\n")
+ } else {
+ // Input: a && b
+ // Output: a\n&& b
+ un.str.WriteString("\n")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
+ }
+ return true
+ }
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
+ return false
+}
+
+// Defined defaults for the unparser options
+var (
+ defaultWrapOnColumn = 80
+ defaultWrapAfterColumnLimit = true
+ defaultOperatorsToWrapOn = map[string]bool{
+ operators.LogicalAnd: true,
+ operators.LogicalOr: true,
+ }
+)
+
+// UnparserOption is a functional option for configuring the output formatting
+// of the Unparse function.
+type UnparserOption func(*unparserOption) (*unparserOption, error)
+
+// Internal representation of the UnparserOption type
+type unparserOption struct {
+ wrapOnColumn int
+ operatorsToWrapOn map[string]bool
+ wrapAfterColumnLimit bool
+}
+
+// WrapOnColumn wraps the output expression when its string length exceeds a specified limit
+// for operators set by WrapOnOperators function or by default, "&&" and "||" will be wrapped.
+//
+// Example usage:
+//
+// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd))
+//
+// This will insert a newline immediately after the logical AND operator for the below example input:
+//
+// Input:
+// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
+//
+// Output:
+// 'my-principal-group' in request.auth.claims &&
+// request.auth.claims.iat > now - duration('5m')
+func WrapOnColumn(col int) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ if col < 1 {
+ return nil, fmt.Errorf("Invalid unparser option. Wrap column value must be greater than or equal to 1. Got %v instead", col)
+ }
+ opt.wrapOnColumn = col
+ return opt, nil
+ }
+}
+
+// WrapOnOperators specifies which operators to perform word wrapping on an output expression when its string length
+// exceeds the column limit set by WrapOnColumn function.
+//
+// Word wrapping is supported on non-unary symbolic operators. Refer to operators.go for the full list
+//
+// This will replace any previously supplied operators instead of merging them.
+func WrapOnOperators(symbols ...string) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ opt.operatorsToWrapOn = make(map[string]bool)
+ for _, symbol := range symbols {
+ _, found := operators.FindReverse(symbol)
+ if !found {
+ return nil, fmt.Errorf("Invalid unparser option. Unsupported operator: %s", symbol)
+ }
+ arity := operators.Arity(symbol)
+ if arity < 2 {
+ return nil, fmt.Errorf("Invalid unparser option. Unary operators are unsupported: %s", symbol)
+ }
+
+ opt.operatorsToWrapOn[symbol] = true
+ }
+
+ return opt, nil
+ }
+}
+
+// WrapAfterColumnLimit dictates whether to insert a newline before or after the specified operator
+// when word wrapping is performed.
+//
+// Example usage:
+//
+// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd), WrapAfterColumnLimit(false))
+//
+// This will insert a newline immediately before the logical AND operator for the below example input, ensuring
+// that the length of a line never exceeds the specified column limit:
+//
+// Input:
+// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m')
+//
+// Output:
+// 'my-principal-group' in request.auth.claims
+// && request.auth.claims.iat > now - duration('5m')
+func WrapAfterColumnLimit(wrapAfter bool) UnparserOption {
+ return func(opt *unparserOption) (*unparserOption, error) {
+ opt.wrapAfterColumnLimit = wrapAfter
+ return opt, nil
+ }
+}
diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS
new file mode 100644
index 000000000..fd736cb1c
--- /dev/null
+++ b/vendor/github.com/google/pprof/AUTHORS
@@ -0,0 +1,7 @@
+# This is the official list of pprof authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+# Names should be added to this file as:
+# Name or Organization
+# The email address is not required for organizations.
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS
new file mode 100644
index 000000000..8c8c37d2c
--- /dev/null
+++ b/vendor/github.com/google/pprof/CONTRIBUTORS
@@ -0,0 +1,16 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name
+Raul Silvera
+Tipp Moseley
+Hyoun Kyu Cho
+Martin Spier
+Taco de Wolff
+Andrew Hunter
diff --git a/vendor/github.com/google/pprof/LICENSE b/vendor/github.com/google/pprof/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/pprof/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
new file mode 100644
index 000000000..8ce9d3cf3
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -0,0 +1,596 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "errors"
+ "sort"
+ "strings"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ keyX := addString(strings, k)
+ vs := s.NumLabel[k]
+ units := s.NumUnit[k]
+ for i, v := range vs {
+ var unitX int64
+ if len(units) != 0 {
+ unitX = addString(strings, units[i])
+ }
+ s.labelX = append(s.labelX,
+ label{
+ keyX: keyX,
+ numX: v,
+ unitX: unitX,
+ },
+ )
+ }
+ }
+ s.locationIDX = make([]uint64, len(s.Location))
+ for i, loc := range s.Location {
+ s.locationIDX[i] = loc.ID
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.commentX = nil
+ for _, c := range p.Comments {
+ p.commentX = append(p.commentX, addString(strings, c))
+ }
+
+ p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+ p.docURLX = addString(strings, p.DocURL)
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+ encodeInt64s(b, 13, p.commentX)
+ encodeInt64(b, 14, p.defaultSampleTypeX)
+ encodeInt64Opt(b, 15, p.docURLX)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ err := decodeMessage(b, x)
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
+ return err
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // int64 time_nanos = 9
+ func(b *buffer, m message) error {
+ if m.(*Profile).TimeNanos != 0 {
+ return errConcatProfile
+ }
+ return decodeInt64(b, &m.(*Profile).TimeNanos)
+ },
+ // int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // ValueType period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+ // repeated int64 comment = 13
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+ // int64 defaultSampleType = 14
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+ // string doc_link = 15;
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ mappingIds := make([]*Mapping, len(p.Mapping)+1)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ if m.ID < uint64(len(mappingIds)) {
+ mappingIds[m.ID] = m
+ } else {
+ mappings[m.ID] = m
+ }
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
+ }
+
+ functions := make(map[uint64]*Function, len(p.Function))
+ functionIds := make([]*Function, len(p.Function)+1)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ if f.ID < uint64(len(functionIds)) {
+ functionIds[f.ID] = f
+ } else {
+ functions[f.ID] = f
+ }
+ }
+
+ locations := make(map[uint64]*Location, len(p.Location))
+ locationIds := make([]*Location, len(p.Location)+1)
+ for _, l := range p.Location {
+ if id := l.mappingIDX; id < uint64(len(mappingIds)) {
+ l.Mapping = mappingIds[id]
+ } else {
+ l.Mapping = mappings[id]
+ }
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].functionIDX = 0
+ if id < uint64(len(functionIds)) {
+ l.Line[i].Function = functionIds[id]
+ } else {
+ l.Line[i].Function = functions[id]
+ }
+ }
+ }
+ if l.ID < uint64(len(locationIds)) {
+ locationIds[l.ID] = l
+ } else {
+ locations[l.ID] = l
+ }
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
+ for _, s := range p.Sample {
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
+ }
+ s.NumUnit = numUnits
+ }
+ }
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
+ for i, lid := range s.locationIDX {
+ if lid < uint64(len(locationIds)) {
+ s.Location[i] = locationIds[lid]
+ } else {
+ s.Location[i] = locations[lid]
+ }
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+
+ for _, i := range p.commentX {
+ var c string
+ c, err = getString(p.stringTable, &i, err)
+ p.Comments = append(p.Comments, c)
+ }
+
+ p.commentX = nil
+ p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
+ p.stringTable = nil
+ return err
+}
+
+// padStringArray pads arr with enough empty strings to make arr
+// length l when arr's length is less than l.
+func padStringArray(arr []string, l int) []string {
+ if l <= len(arr) {
+ return arr
+ }
+ return append(arr, make([]string, l-len(arr))...)
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ encodeInt64s(b, 2, p.Value)
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+ encodeInt64Opt(b, 4, p.unitX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
+ // optional int64 num = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+ encodeBoolOpt(b, 5, p.IsFolded)
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+ encodeInt64Opt(b, 3, p.Column)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+ // optional int64 column = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go
new file mode 100644
index 000000000..c794b9390
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/filter.go
@@ -0,0 +1,274 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+// Implements methods to filter samples from profiles.
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ if show != nil {
+ l.Line = l.matchedLines(show)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ } else {
+ hnm = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// ShowFrom drops all stack frames above the highest matching frame and returns
+// whether a match was found. If showFrom is nil it returns false and does not
+// modify the profile.
+//
+// Example: consider a sample with frames [A, B, C, B], where A is the root.
+// ShowFrom(nil) returns false and has frames [A, B, C, B].
+// ShowFrom(A) returns true and has frames [A, B, C, B].
+// ShowFrom(B) returns true and has frames [B, C, B].
+// ShowFrom(C) returns true and has frames [C, B].
+// ShowFrom(D) returns false and drops the sample because no frames remain.
+func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
+ if showFrom == nil {
+ return false
+ }
+ // showFromLocs stores location IDs that matched ShowFrom.
+ showFromLocs := make(map[uint64]bool)
+ // Apply to locations.
+ for _, loc := range p.Location {
+ if filterShowFromLocation(loc, showFrom) {
+ showFromLocs[loc.ID] = true
+ matched = true
+ }
+ }
+ // For all samples, strip locations after the highest matching one.
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ if showFromLocs[sample.Location[i].ID] {
+ sample.Location = sample.Location[:i+1]
+ s = append(s, sample)
+ break
+ }
+ }
+ }
+ p.Sample = s
+ return matched
+}
+
+// filterShowFromLocation tests a showFrom regex against a location, removes
+// lines after the last match and returns whether a match was found. If the
+// mapping is matched, then all lines are kept.
+func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
+ if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
+ return true
+ }
+ if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
+ loc.Line = loc.Line[:i+1]
+ return true
+ }
+ return false
+}
+
+// lastMatchedLineIndex returns the index of the last line that matches a regex,
+// or -1 if no match is found.
+func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
+ for i := len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// FilterTagsByName filters the tags in a profile and only keeps
+// tags that match show and not hide.
+func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
+ matchRemove := func(name string) bool {
+ matchShow := show == nil || show.MatchString(name)
+ matchHide := hide != nil && hide.MatchString(name)
+
+ if matchShow {
+ sm = true
+ }
+ if matchHide {
+ hm = true
+ }
+ return !matchShow || matchHide
+ }
+ for _, s := range p.Sample {
+ for lab := range s.Label {
+ if matchRemove(lab) {
+ delete(s.Label, lab)
+ }
+ }
+ for lab := range s.NumLabel {
+ if matchRemove(lab) {
+ delete(s.NumLabel, lab)
+ }
+ }
+ }
+ return
+}
+
+// matchesName returns whether the location matches the regular
+// expression. It checks any available function names, file names, and
+// mapping object filename.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return true
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return nil
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// matchedLines returns the lines in the location that match
+// the regular expression.
+func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return loc.Line
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(s *Sample) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := true, false
+ if focus != nil {
+ focused = focus(s)
+ }
+ if ignore != nil {
+ ignored = ignore(s)
+ }
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go
new file mode 100644
index 000000000..bef1d6046
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/index.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// SampleIndexByName returns the appropriate index for a value of sample index.
+// If numeric, it returns the number, otherwise it looks up the text in the
+// profile sample types.
+func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
+ if sampleIndex == "" {
+ if dst := p.DefaultSampleType; dst != "" {
+ for i, t := range sampleTypes(p) {
+ if t == dst {
+ return i, nil
+ }
+ }
+ }
+ // By default select the last sample value
+ return len(p.SampleType) - 1, nil
+ }
+ if i, err := strconv.Atoi(sampleIndex); err == nil {
+ if i < 0 || i >= len(p.SampleType) {
+ return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
+ }
+ return i, nil
+ }
+
+ // Remove the inuse_ prefix to support legacy pprof options
+ // "inuse_space" and "inuse_objects" for profiles containing types
+ // "space" and "objects".
+ noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
+ for i, t := range p.SampleType {
+ if t.Type == sampleIndex || t.Type == noInuse {
+ return i, nil
+ }
+ }
+
+ return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
+}
+
+func sampleTypes(p *Profile) []string {
+ types := make([]string, len(p.SampleType))
+ for i, t := range p.SampleType {
+ types[i] = t.Type
+ }
+ return types
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
new file mode 100644
index 000000000..4580bab18
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -0,0 +1,315 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert java legacy profiles into
+// the profile.proto format.
+
+package profile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
+ javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
+ javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
+ javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
+ javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
+)
+
+// javaCPUProfile returns a new Profile from profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
+ }
+ var err error
+ var locs map[uint64]*Location
+ if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
+ return nil, err
+ }
+
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaProfile returns a new profile from heapz or contentionz
+// data. b is the profile bytes after the header.
+func parseJavaProfile(b []byte) (*Profile, error) {
+ h := bytes.SplitAfterN(b, []byte("\n"), 2)
+ if len(h) < 2 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{},
+ }
+ header := string(bytes.TrimSpace(h[0]))
+
+ var err error
+ var pType string
+ switch header {
+ case "--- heapz 1 ---":
+ pType = "heap"
+ case "--- contentionz 1 ---":
+ pType = "contention"
+ default:
+ return nil, errUnrecognized
+ }
+
+ if b, err = parseJavaHeader(pType, h[1], p); err != nil {
+ return nil, err
+ }
+ var locs map[uint64]*Location
+ if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
+ return nil, err
+ }
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaHeader parses the attribute section on a java profile and
+// populates a profile. Returns the remainder of the buffer after all
+// attributes.
+func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ h := attributeRx.FindStringSubmatch(line)
+ if h == nil {
+ // Not a valid attribute, exit.
+ return b, nil
+ }
+
+ attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
+ var err error
+ switch pType + "/" + attribute {
+ case "heap/format", "cpu/format", "contention/format":
+ if value != "java" {
+ return nil, errUnrecognized
+ }
+ case "heap/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: value},
+ }
+ case "contention/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: value},
+ }
+ case "contention/sampling period":
+ p.PeriodType = &ValueType{
+ Type: "contentions", Unit: "count",
+ }
+ if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ case "contention/ms since reset":
+ millis, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ p.DurationNanos = millis * 1000 * 1000
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, nil
+}
+
+// parseJavaSamples parses the samples from a java profile and
+// populates the Samples in a profile. Returns the remainder of the
+// buffer after the samples.
+func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ locs := make(map[uint64]*Location)
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ sample := javaSampleRx.FindStringSubmatch(line)
+ if sample == nil {
+ // Not a valid sample, exit.
+ return b, locs, nil
+ }
+
+ // Java profiles have data/fields inverted compared to other
+ // profile types.
+ var err error
+ value1, value2, value3 := sample[2], sample[1], sample[3]
+ addrs, err := parseHexAddresses(value3)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ s := &Sample{
+ Value: make([]int64, 2),
+ Location: sloc,
+ }
+
+ if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+
+ switch pType {
+ case "heap":
+ const javaHeapzSamplingRate = 524288 // 512K
+ if s.Value[0] == 0 {
+ return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
+ }
+ s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
+ s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
+ case "contention":
+ if period := p.Period; period != 0 {
+ s.Value[0] = s.Value[0] * p.Period
+ s.Value[1] = s.Value[1] * p.Period
+ }
+ }
+ p.Sample = append(p.Sample, s)
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, locs, nil
+}
+
+// parseJavaLocations parses the location information in a java
+// profile and populates the Locations in a profile. It uses the
+// location addresses from the profile as both the ID of each
+// location.
+func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
+ r := bytes.NewBuffer(b)
+ fns := make(map[string]*Function)
+ for {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if line == "" {
+ break
+ }
+ }
+
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+
+ jloc := javaLocationRx.FindStringSubmatch(line)
+ if len(jloc) != 3 {
+ continue
+ }
+ addr, err := strconv.ParseUint(jloc[1], 16, 64)
+ if err != nil {
+ return fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ loc := locs[addr]
+ if loc == nil {
+ // Unused/unseen
+ continue
+ }
+ var lineFunc, lineFile string
+ var lineNo int64
+
+ if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
+ // Found a line of the form: "function (file:line)"
+ lineFunc, lineFile = fileLine[1], fileLine[2]
+ if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
+ lineNo = n
+ }
+ } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
+ // If there's not a file:line, it's a shared library path.
+ // The path isn't interesting, so just give the .so.
+ lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
+ } else if strings.Contains(jloc[2], "generated stub/JIT") {
+ lineFunc = "STUB"
+ } else {
+ // Treat whole line as the function name. This is used by the
+ // java agent for internal states such as "GC" or "VM".
+ lineFunc = jloc[2]
+ }
+ fn := fns[lineFunc]
+
+ if fn == nil {
+ fn = &Function{
+ Name: lineFunc,
+ SystemName: lineFunc,
+ Filename: lineFile,
+ }
+ fns[lineFunc] = fn
+ p.Function = append(p.Function, fn)
+ }
+ loc.Line = []Line{
+ {
+ Function: fn,
+ Line: lineNo,
+ },
+ }
+ loc.Address = 0
+ }
+
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go
new file mode 100644
index 000000000..8d07fd6c2
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -0,0 +1,1228 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`)
+ countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`)
+
+ heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+ growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`)
+
+ fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`)
+
+ threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+ threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools.
+ // Recommended format:
+ // Start End object file name offset(optional) linker build id
+ // 0x40000-0x80000 /path/to/binary (@FF00) abc123456
+ spaceDigits = `\s+[[:digit:]]+`
+ hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+`
+ oSpace = `\s*`
+ // Capturing expressions.
+ cHex = `(?:0x)?([[:xdigit:]]+)`
+ cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?`
+ cSpaceString = `(?:\s+(\S+))?`
+ cSpaceHex = `(?:\s+([[:xdigit:]]+))?`
+ cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?`
+ cPerm = `(?:\s+([-rwxp]+))?`
+
+ procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString)
+ briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex)
+
+ // Regular expression to parse log data, of the form:
+ // ... file:line] msg...
+ logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`)
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip comments at the beginning of the file.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ m := countStartRE.FindStringSubmatch(s.Text())
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for s.Scan() {
+ line := s.Text()
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on top of the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ if len(p.Mapping) > 0 {
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+ }
+
+ // Subtract the offset from the start of the main mapping if it
+ // ends up at a recognizable start address.
+ if len(p.Mapping) > 0 {
+ const expectedStart = 0x400000
+ if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+ m.Start = expectedStart
+ m.Offset = 0
+ }
+ }
+
+ // Associate each location with an address to the corresponding
+ // mapping. Create fake mapping if a suitable one isn't found.
+ var fake *Mapping
+nextLocation:
+ for _, l := range p.Location {
+ a := l.Address
+ if l.Mapping != nil || a == 0 {
+ continue
+ }
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // Work around legacy handlers failing to encode the first
+ // part of mappings split into adjacent ranges.
+ for _, m := range p.Mapping {
+ if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start {
+ m.Start -= m.Offset
+ m.Offset = 0
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // If there is still no mapping, create a fake one.
+ // This is important for the Go legacy handler, which produced
+ // no mappings.
+ if fake == nil {
+ fake = &Mapping{
+ ID: 1,
+ Limit: ^uint64(0),
+ }
+ p.Mapping = append(p.Mapping, fake)
+ }
+ l.Mapping = fake
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return javaCPUProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If *most* samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+
+ // Remove up to two frames.
+ maxiter := 2
+ // Allow one different sample for this many samples with the same
+ // second-to-last frame.
+ similarSamples := 32
+ margin := len(p.Sample) / similarSamples
+
+ for iter := 0; iter < maxiter; iter++ {
+ addr1 := make(map[uint64]int)
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 {
+ a := s.Location[1].Address
+ addr1[a] = addr1[a] + 1
+ }
+ }
+
+ for id1, count := range addr1 {
+ if count >= len(p.Sample)-margin {
+ // Found uninteresting frame, strip it out from all samples
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[1].Address == id1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ break
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+func cleanupDuplicateLocations(p *Profile) {
+ // The profile handler may duplicate the leaf frame, because it gets
+ // its address both from stack unwinding and from the signal
+ // context. Detect this and delete the duplicate, which has been
+ // adjusted by -1. The leaf address should not be adjusted as it is
+ // not a call.
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
+// The last stack trace is of the form:
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+ p = &Profile{}
+
+ sampling := ""
+ hasAlloc := false
+
+ line := s.Text()
+ p.PeriodType = &ValueType{Type: "space", Unit: "bytes"}
+ if header := heapHeaderRE.FindStringSubmatch(line); header != nil {
+ sampling, p.Period, hasAlloc, err = parseHeapHeader(line)
+ if err != nil {
+ return nil, err
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if hasAlloc {
+ // Put alloc before inuse so that default pprof selection
+ // will prefer inuse_space.
+ p.SampleType = []*ValueType{
+ {Type: "alloc_objects", Unit: "count"},
+ {Type: "alloc_space", Unit: "bytes"},
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: "bytes"},
+ }
+ } else {
+ p.SampleType = []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+
+ if isSpaceOrComment(line) {
+ continue
+ }
+
+ if isMemoryMapSentinel(line) {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc)
+ if err != nil {
+ return nil, err
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) {
+ header := heapHeaderRE.FindStringSubmatch(line)
+ if header == nil {
+ return "", 0, false, errUnrecognized
+ }
+
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return "", 0, false, errUnrecognized
+ }
+ }
+
+ if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") {
+ hasAlloc = true
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ return "v2", period, hasAlloc, nil
+ case "heapprofile":
+ return "", 1, hasAlloc, nil
+ case "heap":
+ return "v2", period / 2, hasAlloc, nil
+ default:
+ return "", 0, false, errUnrecognized
+ }
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // This is a local-scoped helper function to avoid needing to pass
+ // around rate, sampling and many return parameters.
+ addValues := func(countString, sizeString string, label string) error {
+ count, err := strconv.ParseInt(countString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ size, err := strconv.ParseInt(sizeString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if count == 0 && size != 0 {
+ return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size)
+ }
+ if count != 0 {
+ blocksize = size / count
+ if sampling == "v2" {
+ count, size = scaleHeapSample(count, size, rate)
+ }
+ }
+ value = append(value, count, size)
+ return nil
+ }
+
+ if includeAlloc {
+ if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil {
+ return nil, 0, nil, err
+ }
+ }
+
+ if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil {
+ return nil, 0, nil, err
+ }
+
+ addrs, err = parseHexAddresses(sampleData[5])
+ if err != nil {
+ return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, blocksize, addrs, nil
+}
+
+// parseHexAddresses extracts hex numbers from a string, attempts to convert
+// each to an unsigned 64-bit number and returns the resulting numbers as a
+// slice, or an error if the string contains hex numbers which are too large to
+// handle (which means a malformed profile).
+func parseHexAddresses(s string) ([]uint64, error) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var addrs []uint64
+ for _, s := range hexStrings {
+ if addr, err := strconv.ParseUint(s, 0, 64); err == nil {
+ addrs = append(addrs, addr)
+ } else {
+ return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s)
+ }
+ }
+ return addrs, nil
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+func parseContention(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+
+ switch l := s.Text(); {
+ case strings.HasPrefix(l, "--- contentionz "):
+ case strings.HasPrefix(l, "--- mutex:"):
+ case strings.HasPrefix(l, "--- contention:"):
+ default:
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = "="
+ for s.Scan() {
+ line := s.Text()
+ if line = strings.TrimSpace(line); isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ attr := strings.SplitN(line, delimiter, 2)
+ if len(attr) != 2 {
+ break
+ }
+ key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ line := strings.TrimSpace(s.Text())
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if !isSpaceOrComment(line) {
+ value, addrs, err := parseContentionSample(line, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+ if !s.Scan() {
+ break
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return nil, nil, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs, err = parseHexAddresses(sampleData[3])
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip past comments and empty lines seeking a real header.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+
+ line := s.Text()
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for s.Scan() {
+ if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for !isMemoryMapSentinel(line) {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ var err error
+ line, addrs, err = parseThreadSample(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for i, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call
+ // (except for the leaf, which is not a call).
+ if i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) {
+ var line string
+ sameAsPrevious := false
+ for s.Scan() {
+ line = strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if strings.Contains(line, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ curAddrs, err := parseHexAddresses(line)
+ if err != nil {
+ return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ addrs = append(addrs, curAddrs...)
+ }
+ if err := s.Err(); err != nil {
+ return "", nil, err
+ }
+ if sameAsPrevious {
+ return line, nil, nil
+ }
+ return line, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(s *bufio.Scanner, p *Profile) error {
+ for !isMemoryMapSentinel(s.Text()) && s.Scan() {
+ }
+ if err := s.Err(); err != nil {
+ return err
+ }
+ return p.ParseMemoryMapFromScanner(s)
+}
+
+// ParseProcMaps parses a memory map in the format of /proc/self/maps.
+// ParseMemoryMap should be called after setting on a profile to
+// associate locations to the corresponding mapping based on their
+// address.
+func ParseProcMaps(rd io.Reader) ([]*Mapping, error) {
+ s := bufio.NewScanner(rd)
+ return parseProcMapsFromScanner(s)
+}
+
+func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) {
+ var mapping []*Mapping
+
+ var attrs []string
+ const delimiter = "="
+ r := strings.NewReplacer()
+ for s.Scan() {
+ line := r.Replace(removeLoggingInfo(s.Text()))
+ m, err := parseMappingEntry(line)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 {
+ attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ mapping = append(mapping, m)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return mapping, nil
+}
+
+// removeLoggingInfo detects and removes log prefix entries generated
+// by the glog package. If no logging prefix is detected, the string
+// is returned unmodified.
+func removeLoggingInfo(line string) string {
+ if match := logInfoRE.FindStringIndex(line); match != nil {
+ return line[match[1]:]
+ }
+ return line
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd))
+}
+
+// ParseMemoryMapFromScanner parses a memory map in the format of
+// /proc/self/maps or a variety of legacy format, and overrides the
+// mappings in the current profile. It renumbers the samples and
+// locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error {
+ mapping, err := parseProcMapsFromScanner(s)
+ if err != nil {
+ return err
+ }
+ p.Mapping = append(p.Mapping, mapping...)
+ p.massageMappings()
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ var start, end, perm, file, offset, buildID string
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5]
+ } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 {
+ start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6]
+ } else {
+ return nil, errUnrecognized
+ }
+
+ var err error
+ mapping := &Mapping{
+ File: file,
+ BuildID: buildID,
+ }
+ if perm != "" && !strings.Contains(perm, "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if offset != "" {
+ if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+}
+
+var memoryMapSentinels = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+// isMemoryMapSentinel returns true if the string contains one of the
+// known sentinels for memory map information.
+func isMemoryMapSentinel(line string) bool {
+ for _, s := range memoryMapSentinels {
+ if strings.Contains(line, s) {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = [][]string{
+ {"allocations", "size"}, // early Go pprof profiles
+ {"objects", "space"},
+ {"inuse_objects", "inuse_space"},
+ {"alloc_objects", "alloc_space"},
+ {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles
+}
+var contentionzSampleTypes = [][]string{
+ {"contentions", "delay"},
+}
+
+func isProfileType(p *Profile, types [][]string) bool {
+ st := p.SampleType
+nextType:
+ for _, t := range types {
+ if len(st) != len(t) {
+ continue
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ continue nextType
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(base::Mutex::)?AwaitCommon.*`,
+ `(base::Mutex::)?Unlock.*`,
+ `(base::Mutex::)?UnlockSlow.*`,
+ `(base::Mutex::)?ReaderUnlock.*`,
+ `(base::MutexLock::)?~MutexLock.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
new file mode 100644
index 000000000..ba4d74640
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -0,0 +1,674 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Compact performs garbage collection on a profile to remove any
+// unreferenced fields. This is useful to reduce the size of a profile
+// after samples or locations have been removed.
+func (p *Profile) Compact() *Profile {
+ p, _ = Merge([]*Profile{p})
+ return p
+}
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID locationIDMap
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
+ }
+ putNumber(0) // Delimiter
+
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
+ }
+
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
+ }
+
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l := pm.locationsByID.get(src.ID); l != nil {
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID.set(src.ID, ll)
+ return ll
+ }
+ pm.locationsByID.set(src.ID, l)
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*3)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ lines[i*2+2] = strconv.FormatInt(line.Column, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ Column: src.Column,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var docURL string
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ if docURL == "" {
+ docURL = s.DocURL
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ DocURL: docURL,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
new file mode 100644
index 000000000..f47a24390
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -0,0 +1,869 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ DefaultSampleType string
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+ Comments []string
+ DocURL string
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ // The following fields are modified during encoding and copying,
+ // so are protected by a Mutex.
+ encodeMu sync.Mutex
+
+ commentX []int64
+ docURLX int64
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+ defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
+ NumLabel map[string][]int64
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
+
+ locationIDX []uint64
+ labelX []label
+}
+
+// label corresponds to Profile.Label
+type label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+ // can be set if numX has value
+ unitX int64
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+ Column int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return ParseData(data)
+}
+
+// ParseData parses a profile from a buffer and checks for its
+// validity.
+func ParseData(data []byte) (*Profile, error) {
+ var p *Profile
+ var err error
+ if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err == nil {
+ data, err = io.ReadAll(gz)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ }
+ if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
+ p, err = parseLegacy(data)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+var errNoData = fmt.Errorf("empty input file")
+var errConcatProfile = fmt.Errorf("concatenated profiles detected")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ parseJavaProfile,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// ParseUncompressed parses an uncompressed protobuf into a profile.
+func ParseUncompressed(data []byte) (*Profile, error) {
+ if len(data) == 0 {
+ return nil, errNoData
+ }
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// massageMappings applies heuristic-based changes to the profile
+// mappings to account for quirks of some environments.
+func (p *Profile) massageMappings() {
+ // Merge adjacent regions with matching names, checking that the offsets match
+ if len(p.Mapping) > 1 {
+ mappings := []*Mapping{p.Mapping[0]}
+ for _, m := range p.Mapping[1:] {
+ lm := mappings[len(mappings)-1]
+ if adjacent(lm, m) {
+ lm.Limit = m.Limit
+ if m.File != "" {
+ lm.File = m.File
+ }
+ if m.BuildID != "" {
+ lm.BuildID = m.BuildID
+ }
+ p.updateLocationMapping(m, lm)
+ continue
+ }
+ mappings = append(mappings, m)
+ }
+ p.Mapping = mappings
+ }
+
+ // Use heuristics to identify main binary and move it to the top of the list of mappings
+ for i, m := range p.Mapping {
+ file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if file[0] == '[' {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
+ break
+ }
+
+ // Keep the mapping IDs neatly sorted
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+// adjacent returns whether two mapping entries represent the same
+// mapping that has been split into two. Check that their addresses are adjacent,
+// and if the offsets match, if they are available.
+func adjacent(m1, m2 *Mapping) bool {
+ if m1.File != "" && m2.File != "" {
+ if m1.File != m2.File {
+ return false
+ }
+ }
+ if m1.BuildID != "" && m2.BuildID != "" {
+ if m1.BuildID != m2.BuildID {
+ return false
+ }
+ }
+ if m1.Limit != m2.Start {
+ return false
+ }
+ if m1.Offset != 0 && m2.Offset != 0 {
+ offset := m1.Offset + (m1.Limit - m1.Start)
+ if offset != m2.Offset {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *Profile) updateLocationMapping(from, to *Mapping) {
+ for _, l := range p.Location {
+ if l.Mapping == from {
+ l.Mapping = to
+ }
+ }
+}
+
+func serialize(p *Profile) []byte {
+ p.encodeMu.Lock()
+ p.preEncode()
+ b := marshal(p)
+ p.encodeMu.Unlock()
+ return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(serialize(p))
+ return err
+}
+
+// WriteUncompressed writes the profile as a marshaled protobuf.
+func (p *Profile) WriteUncompressed(w io.Writer) error {
+ _, err := w.Write(serialize(p))
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if s == nil {
+ return fmt.Errorf("profile has nil sample")
+ }
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ for _, l := range s.Location {
+ if l == nil {
+ return fmt.Errorf("sample has nil location")
+ }
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m == nil {
+ return fmt.Errorf("profile has nil mapping")
+ }
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f == nil {
+ return fmt.Errorf("profile has nil function")
+ }
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l == nil {
+ return fmt.Errorf("profile has nil location")
+ }
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ f := ln.Function
+ if f == nil {
+ return fmt.Errorf("location id: %d has a line with nil function", l.ID)
+ }
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber || !columnnumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ l.Line[i].Column = 0
+ }
+ }
+ if !columnnumber {
+ for i := range l.Line {
+ l.Line[i].Column = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// NumLabelUnits returns a map of numeric label keys to the units
+// associated with those keys and a map of those keys to any units
+// that were encountered but not used.
+// Unit for a given key is the first encountered unit for that key. If multiple
+// units are encountered for values paired with a particular key, then the first
+// unit encountered is used and all other units are returned in sorted order
+// in map of ignored units.
+// If no units are encountered for a particular key, the unit is then inferred
+// based on the key.
+func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
+ numLabelUnits := map[string]string{}
+ ignoredUnits := map[string]map[string]bool{}
+ encounteredKeys := map[string]bool{}
+
+ // Determine units based on numeric tags for each sample.
+ for _, s := range p.Sample {
+ for k := range s.NumLabel {
+ encounteredKeys[k] = true
+ for _, unit := range s.NumUnit[k] {
+ if unit == "" {
+ continue
+ }
+ if wantUnit, ok := numLabelUnits[k]; !ok {
+ numLabelUnits[k] = unit
+ } else if wantUnit != unit {
+ if v, ok := ignoredUnits[k]; ok {
+ v[unit] = true
+ } else {
+ ignoredUnits[k] = map[string]bool{unit: true}
+ }
+ }
+ }
+ }
+ }
+ // Infer units for keys without any units associated with
+ // numeric tag values.
+ for key := range encounteredKeys {
+ unit := numLabelUnits[key]
+ if unit == "" {
+ switch key {
+ case "alignment", "request":
+ numLabelUnits[key] = "bytes"
+ default:
+ numLabelUnits[key] = key
+ }
+ }
+ }
+
+ // Copy ignored units into more readable format
+ unitsIgnored := make(map[string][]string, len(ignoredUnits))
+ for key, values := range ignoredUnits {
+ units := make([]string, len(values))
+ i := 0
+ for unit := range values {
+ units[i] = unit
+ i++
+ }
+ sort.Strings(units)
+ unitsIgnored[key] = units
+ }
+
+ return numLabelUnits, unitsIgnored
+}
+
+// String dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+ ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
+ for _, c := range p.Comments {
+ ss = append(ss, "Comment: "+c)
+ }
+ if url := p.DocURL; url != "" {
+ ss = append(ss, fmt.Sprintf("Doc: %s", url))
+ }
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ dflt := ""
+ if s.Type == p.DefaultSampleType {
+ dflt = "[dflt]"
+ }
+ sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ ss = append(ss, s.string())
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ ss = append(ss, l.string())
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ ss = append(ss, m.string())
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// string dumps a text representation of a mapping. Intended mainly
+// for debugging purposes.
+func (m *Mapping) string() string {
+ bits := ""
+ if m.HasFunctions {
+ bits = bits + "[FN]"
+ }
+ if m.HasFilenames {
+ bits = bits + "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits = bits + "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits = bits + "[IN]"
+ }
+ return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits)
+}
+
+// string dumps a text representation of a location. Intended mainly
+// for debugging purposes.
+func (l *Location) string() string {
+ ss := []string{}
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if l.IsFolded {
+ locStr = locStr + "[F] "
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ l.Line[li].Column,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ return strings.Join(ss, "\n")
+}
+
+// string dumps a text representation of a sample. Intended mainly
+// for debugging purposes.
+func (s *Sample) string() string {
+ ss := []string{}
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ss = append(ss, labelHeader+labelsToString(s.Label))
+ }
+ if len(s.NumLabel) > 0 {
+ ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
+ }
+ return strings.Join(ss, "\n")
+}
+
+// labelsToString returns a string representation of a
+// map representing labels.
+func labelsToString(labels map[string][]string) string {
+ ls := []string{}
+ for k, v := range labels {
+ ls = append(ls, fmt.Sprintf("%s:%v", k, v))
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// numLabelsToString returns a string representation of a map
+// representing numeric labels.
+func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
+ ls := []string{}
+ for k, v := range numLabels {
+ units := numUnits[k]
+ var labelString string
+ if len(units) == len(v) {
+ values := make([]string, len(v))
+ for i, vv := range v {
+ values[i] = fmt.Sprintf("%d %s", vv, units[i])
+ }
+ labelString = fmt.Sprintf("%s:%v", k, values)
+ } else {
+ labelString = fmt.Sprintf("%s:%v", k, v)
+ }
+ ls = append(ls, labelString)
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// SetLabel sets the specified key to the specified value for all samples in the
+// profile.
+func (p *Profile) SetLabel(key string, value []string) {
+ for _, sample := range p.Sample {
+ if sample.Label == nil {
+ sample.Label = map[string][]string{key: value}
+ } else {
+ sample.Label[key] = value
+ }
+ }
+}
+
+// RemoveLabel removes all labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.Label, key)
+ }
+}
+
+// HasLabel returns true if a sample has a label with indicated key and value.
+func (s *Sample) HasLabel(key, value string) bool {
+ for _, v := range s.Label[key] {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
+// DiffBaseSample returns true if a sample belongs to the diff base and false
+// otherwise.
+func (s *Sample) DiffBaseSample() bool {
+ return s.HasLabel("pprof::base", "true")
+}
+
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ fillIdx := 0
+ for _, s := range p.Sample {
+ keepSample := false
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ val := int64(math.Round(float64(v) * ratios[i]))
+ s.Value[i] = val
+ keepSample = keepSample || val != 0
+ }
+ }
+ if keepSample {
+ p.Sample[fillIdx] = s
+ fillIdx++
+ }
+ }
+ p.Sample = p.Sample[:fillIdx]
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+// Unsymbolizable returns true if a mapping points to a binary for which
+// locations can't be symbolized in principle, at least now. Examples are
+// "[vdso]", "[vsyscall]" and some others, see the code.
+func (m *Mapping) Unsymbolizable() bool {
+ name := filepath.Base(m.File)
+ return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ pp := &Profile{}
+ if err := unmarshal(serialize(p), pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
new file mode 100644
index 000000000..a15696ba1
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -0,0 +1,367 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a simple protocol buffer encoder and decoder.
+// The format is described at
+// https://developers.google.com/protocol-buffers/docs/encoding
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+)
+
+type buffer struct {
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if x {
+ encodeBool(b, tag, x)
+ }
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var u uint64
+ for i := 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
new file mode 100644
index 000000000..b2f9fd546
--- /dev/null
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -0,0 +1,194 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var (
+ reservedNames = []string{"(anonymous namespace)", "operator()"}
+ bracketRx = func() *regexp.Regexp {
+ var quotedNames []string
+ for _, name := range append(reservedNames, "(") {
+ quotedNames = append(quotedNames, regexp.QuoteMeta(name))
+ }
+ return regexp.MustCompile(strings.Join(quotedNames, "|"))
+ }()
+)
+
+// simplifyFunc does some primitive simplification of function names.
+func simplifyFunc(f string) string {
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ funcName := strings.TrimPrefix(f, ".")
+ // Account for unsimplified names -- try to remove the argument list by trimming
+ // starting from the first '(', but skipping reserved names that have '('.
+ for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
+ foundReserved := false
+ for _, res := range reservedNames {
+ if funcName[ind[0]:ind[1]] == res {
+ foundReserved = true
+ break
+ }
+ }
+ if !foundReserved {
+ funcName = funcName[:ind[0]]
+ break
+ }
+ }
+ return funcName
+}
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ if pruneFromHere(fn.Name) {
+ break
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
+
+// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
+//
+// Please see the example below to understand this method as well as
+// the difference from Prune method.
+//
+// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
+//
+// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
+// Prune(A, nil) returns [B,C,B,D] by removing A itself.
+//
+// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
+// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
+func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ for i := 0; i < len(loc.Line); i++ {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := simplifyFunc(fn.Name)
+ if dropRx.MatchString(funcName) {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+ loc.Line = loc.Line[i:]
+ break
+ }
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the bottom leaf to the root to find the prune location.
+ for i, loc := range sample.Location {
+ if pruneBeneath[loc.ID] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 000000000..5f920e973
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 Alan Shreve (@inconshreveable)
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 000000000..7a950d177
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 000000000..06a91f086
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 000000000..0c5688021
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,42 @@
+package mousetrap
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(syscall.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore
new file mode 100644
index 000000000..18793c248
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+TODO
+tmp/**/*
+*.coverprofile
+.vscode
+.idea/
+*.log
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
new file mode 100644
index 000000000..3011efb57
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -0,0 +1,1075 @@
+## 2.21.0
+
+
+ ### Features
+ - add support for GINKGO_TIME_FORMAT [a69eb39]
+ - add GINKGO_NO_COLOR to disable colors via environment variables [bcab9c8]
+
+ ### Fixes
+ - increase threshold in timeline matcher [e548367]
+ - Fix the document by replacing `SpecsThatWillBeRun` with `SpecsThatWillRun`
+ [c2c4d3c]
+
+ ### Maintenance
+ - bump various dependencies [7e65a00]
+
+## 2.20.2
+
+Require Go 1.22+
+
+### Maintenance
+- bump go to v1.22 [a671816]
+
+## 2.20.1
+
+### Fixes
+- make BeSpecEvent duration matcher more forgiving [d6f9640]
+
+## 2.20.0
+
+### Features
+- Add buildvcs flag [be5ab95]
+
+### Maintenance
+- Add update-deps to makefile [d303d14]
+- bump all dependencies [7a50221]
+
+## 2.19.1
+
+### Fixes
+- update supported platforms for race conditions [63c8c30]
+- [build] Allow custom name for binaries. [ff41e27]
+
+### Maintenance
+- bump gomega [76f4e0c]
+- Bump rexml from 3.2.6 to 3.2.8 in /docs (#1417) [b69c00d]
+- Bump golang.org/x/sys from 0.20.0 to 0.21.0 (#1425) [f097741]
+
+## 2.19.0
+
+### Features
+
+[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering.
+
+## 2.18.0
+
+### Features
+- Add --slience-skips and --force-newlines [f010b65]
+- fail when no tests were run and --fail-on-empty was set [d80eebe]
+
+### Fixes
+- Fix table entry context edge case [42013d6]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7]
+- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7]
+
+## 2.17.3
+
+### Fixes
+`ginkgo watch` now ignores hidden files [bde6e00]
+
+## 2.17.2
+
+### Fixes
+- fix: close files [32259c8]
+- fix github output log level for skipped specs [780e7a3]
+
+### Maintenance
+- Bump github.com/google/pprof [d91fe4e]
+- Bump github.com/go-task/slim-sprig to v3 [8cb662e]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422]
+- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4]
+- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8]
+- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4]
+- Fix test for gomega version bump [f2fcd97]
+- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2]
+- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26]
+- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170]
+- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a]
+
+## 2.17.1
+
+### Fixes
+- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
+
+## 2.17.0
+
+### Features
+
+- add `--github-output` for nicer output in github actions [e8a2056]
+
+### Maintenance
+
+- fix typo in core_dsl.go [977bc6f]
+- Fix typo in docs [e297e7b]
+
+## 2.16.0
+
+### Features
+- add SpecContext to reporting nodes
+
+### Fixes
+- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
+- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
+
+### Maintenance
+- docs/index.md: Typo [2cebe8d]
+- fix docs [06de431]
+- chore: test with Go 1.22 (#1352) [898cba9]
+- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
+- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
+- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
+- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
+- Fix docs for handling failures in goroutines (#1339) [4471b2e]
+
+## 2.15.0
+
+### Features
+
+- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70]
+- include cancellation reason when cancelling spec context [96e915c]
+
+### Fixes
+
+- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09]
+- fix outline when using nodot in ginkgo v2 [dca77c8]
+- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f]
+- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14]
+
+### Maintenance
+
+- Bump to go 1.20 [4fcd0b3]
+
+## 2.14.0
+
+### Features
+You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library.
+
+Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more.
+
+- Introduce DescribeTableSubtree [65ec56d]
+- add GinkgoTB() to docs [4a2c832]
+- Add GinkgoTB() function (#1333) [92b6744]
+
+### Fixes
+- Fix typo in internal/suite.go (#1332) [beb9507]
+- Fix typo in docs/index.md (#1319) [4ac3a13]
+- allow wasm to compile with ginkgo present (#1311) [b2e5bc5]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec]
+- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40]
+- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724]
+- Bump golang.org/x/crypto (#1318) [3ee80ee]
+- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5]
+- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3]
+
+## 2.13.2
+
+### Fixes
+- Fix file handler leak (#1309) [e2e81c8]
+- Avoid allocations with `(*regexp.Regexp).MatchString` (#1302) [3b2a2a7]
+
+## 2.13.1
+
+### Fixes
+- # 1296 fix(precompiled test guite): exec bit check omitted on Windows (#1301) [26eea01]
+
+### Maintenance
+- Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#1291) [7161a9d]
+- Bump golang.org/x/sys from 0.13.0 to 0.14.0 (#1295) [7fc7b10]
+- Bump golang.org/x/tools from 0.12.0 to 0.14.0 (#1282) [74bbd65]
+- Bump github.com/onsi/gomega from 1.27.10 to 1.29.0 (#1290) [9373633]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1286) [6e3cf65]
+
+## 2.13.0
+
+### Features
+
+Add PreviewSpect() to enable programmatic preview access to the suite report (fixes #1225)
+
+## 2.12.1
+
+### Fixes
+- Print logr prefix if it exists (#1275) [90d4846]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#1271) [555f543]
+- Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#1270) [d867b7d]
+
+## 2.12.0
+
+### Features
+
+- feat: allow MustPassRepeatedly decorator to be set at suite level (#1266) [05de518]
+
+### Fixes
+
+- fix-errors-in-readme (#1244) [27c2f5d]
+
+### Maintenance
+
+Various chores/dependency bumps.
+
+## 2.11.0
+
+In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways:
+
+- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests
+- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs.
+
+Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code.
+
+This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve.
+
+### Fixes
+- Programmatic focus is no longer overwrriten by CLI filters [d6bba86]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38]
+- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d]
+
+## 2.10.0
+
+### Features
+- feat(ginkgo/generators): add --tags flag (#1216) [a782a77]
+ adds a new --tags flag to ginkgo generate
+
+### Fixes
+- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e]
+
+## 2.9.7
+
+### Fixes
+- fix race when multiple defercleanups are called in goroutines [07fc3a0]
+
+## 2.9.6
+
+### Fixes
+- fix: create parent directory before report files (#1212) [0ac65de]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231]
+
+## 2.9.5
+
+### Fixes
+- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
+
+### Maintenance
+- fix generators link (#1200) [9f9d8b9]
+- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
+- fix spelling err in docs (#1199) [0013b1a]
+- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
+
+## 2.9.4
+
+### Fixes
+- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
+
+- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
+
+
+### Maintenance
+- Document run order when multiple setup nodes are at the same nesting level [903be81]
+
+## 2.9.3
+
+### Features
+- Add RenderTimeline to GinkgoT() [c0c77b6]
+
+### Fixes
+- update Measure deprecation message. fixes #1176 [227c662]
+- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
+
+### Maintenance
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
+- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
+- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
+- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
+- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
+- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
+
+## 2.9.2
+
+### Maintenance
+- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
+- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
+
+## 2.9.1
+
+### Fixes
+This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
+- Support -coverpkg=./... [26ca1b5]
+- document coverpkg a bit more clearly [fc44c3b]
+
+### Maintenance
+- bump various dependencies
+- Improve Documentation and fix typo (#1158) [93de676]
+
+## 2.9.0
+
+### Features
+- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
+
+- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
+
+ The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
+
+## 2.8.4
+
+### Features
+- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
+- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
+
+### Fixes
+- rename tools hack to see if it fixes things for downstream users [a8bb39a]
+
+### Maintenance
+- Bump golang.org/x/text (#1144) [41b2a8a]
+- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
+
+## 2.8.3
+
+Released to fix security issue in golang.org/x/net dependency
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
+- remove tools.go hack from documentation [0718693]
+
+## 2.8.2
+
+Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
+
+### Maintenance
+
+- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
+- Fix minor typos (#1138) [e1e9723]
+- Fix link in V2 Migration Guide (#1137) [a588f60]
+
+## 2.8.1
+
+### Fixes
+- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
+- don't run ReportEntries through sprintf [febbe38]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
+- test: update matrix for Go 1.20 (#1130) [4890a62]
+- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
+- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
+- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
+- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
+- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
+
+## 2.8.0
+
+### Features
+
+- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
+
+Modeled after `testing.T.Helper()`. Now, rather than write code like:
+
+```go
+func helper(model Model) {
+ Expect(model).WithOffset(1).To(BeValid())
+ Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
+
+```go
+func helper(model Model) {
+ GinkgoHelper()
+ Expect(model).To(BeValid())
+ Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
+
+You can now write code like this:
+
+```go
+BeforeSuite(func() {
+ if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do slow setup
+ }
+
+ if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do fast setup
+ }
+})
+```
+
+to programmatically check whether a given set of labels will match the configured `--label-filter`.
+
+### Maintenance
+
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
+- cdeql: add ruby language (#1124) [9dd275b]
+- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
+
+## 2.7.1
+
+### Fixes
+- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
+- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
+- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
+- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
+
+## 2.7.0
+
+### Features
+- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
+- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
+- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
+- Color aliases for custom color support (#1101) [49fab7a]
+
+### Fixes
+- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
+- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
+- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
+
+## 2.6.1
+
+### Features
+- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
+
+### Fixes
+- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
+- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
+
+## 2.6.0
+
+### Features
+- `ReportBeforeSuite` provides access to the suite report before the suite begins.
+- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
+- Add support to customize junit report config to omit spec labels (#1087) [de44005]
+
+### Fixes
+- Fix stack trace pruning so that it has a chance of working on windows [2165648]
+
+## 2.5.1
+
+### Fixes
+- skipped tests only show as 'S' when running with -v [3ab38ae]
+- Fix typo in docs/index.md (#1082) [55fc58d]
+- Fix typo in docs/index.md (#1081) [8a14f1f]
+- Fix link notation in docs/index.md (#1080) [2669612]
+- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
+
+### Maintenance
+- chore: Included githubactions in the dependabot config (#976) [baea341]
+- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
+
+## 2.5.0
+
+### Ginkgo output now includes a timeline-view of the spec
+
+This commit changes Ginkgo's default output. Spec details are now
+presented as a **timeline** that includes events that occur during the spec
+lifecycle interleaved with any GinkgoWriter content. This makes is much easier
+to understand the flow of a spec and where a given failure occurs.
+
+The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
+and the SuppressProgressReporting decorator have all been deprecated. Instead
+the existing -v and -vv flags better capture the level of verbosity to display. However,
+a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
+in the spec timeline.
+
+In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
+reports can be configured and generated using
+`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
+
+Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
+was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
+to build tooling on top of as it has stronger guarantees to be stable from version to version.
+
+### Features
+- Provide details about which timeout expired [0f2fa27]
+
+### Fixes
+- Add Support Policy to docs [c70867a]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
+
+## 2.4.0
+
+### Features
+
+- DeferCleanup supports functions with multiple-return values [5e33c75]
+- Add GinkgoLogr (#1067) [bf78c28]
+- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
+
+### Fixes
+- correcting some typos (#1064) [1403d3c]
+- fix flaky internal_integration interrupt specs [2105ba3]
+- Correct busted link in README [be6b5b9]
+
+### Maintenance
+- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
+- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
+- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
+- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
+- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
+- Add GHA to dependabot config [4442772]
+
+## 2.3.1
+
+## Fixes
+Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
+
+With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
+
+- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
+- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
+
+### Maintenance
+- bump gomega to v1.22.0 [822a937]
+
+## 2.3.0
+
+### Interruptible Nodes and Timeouts
+
+Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
+
+```go
+It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
+ // do things until `ctx.Done()` is closed, for example:
+ req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+
+ Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
+}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
+```
+
+and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
+
+### Features
+
+- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
+- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
+- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
+
+## Fixes
+- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
+- Make the outline command able to use the DSL import [1be2427]
+
+## Maintenance
+- chore(docs): delete no meaning d [57c373c]
+- chore(docs): Fix hyperlinks [30526d5]
+- chore(docs): fix code blocks without language settings [cf611c4]
+- fix intra-doc link [b541bcb]
+
+## 2.2.0
+
+### Generate real-time Progress Reports [f91377c]
+
+Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
+
+These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
+
+In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
+
+Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
+
+Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
+
+### Features
+- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
+
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+
+### Maintenance
+- Modernize the invocation of Ginkgo in github actions [0ffde58]
+- Update reocmmended CI settings in docs [896bbb9]
+- Speed up unnecessarily slow integration test [6d3a90e]
+
+## 2.1.6
+
+### Fixes
+- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a]
+- chore: remove duplicate word in comments [7373214]
+
+## 2.1.5
+
+### Fixes
+- drop -mod=mod instructions; fixes #1026 [6ad7138]
+- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b]
+- remove stale importmap gcflags flag test [3cd8b93]
+- Always emit spec summary [5cf23e2] - even when only one spec has failed
+- Fix ReportAfterSuite usage in docs [b1864ad]
+- fixed typo (#997) [219cc00]
+- TrimRight is not designed to trim Suffix [71ebb74]
+- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208]
+- fix syntax in examples (#975) [b69554f]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4]
+- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a]
+- test: add new Go 1.19 to test matrix (#1014) [bbefe12]
+- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906]
+- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96]
+- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa]
+
+## 2.1.4
+
+### Fixes
+- Numerous documentation typos
+- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
+- improve error message when a parallel process fails to report back [a7bd1fe]
+- guard against concurrent map writes in DeprecationTracker [0976569]
+- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
+- Fix ginkgo import circle [f779385]
+
+## 2.1.3
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Calling By in a container node now emits a useful error. [ff12cee]
+
+## 2.1.2
+
+### Fixes
+
+- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
+- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
+- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
+
+## 2.1.1
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
+
+## 2.1.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+2.1.0 is a minor release with a few tweaks:
+
+- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
+- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
+- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
+
+## 2.0.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
+
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
+## 1.16.4
+
+### Fixes
+1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
+
+## 1.16.3
+
+### Features
+- Measure is now deprecated and emits a deprecation warning.
+
+## 1.16.2
+
+### Fixes
+- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable.
+
+## 1.16.1
+
+### Fixes
+- Suppress --stream deprecation warning on windows (#793)
+
+## 1.16.0
+
+### Features
+- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
+ - Update README.md to advertise that Ginkgo 2.0 is coming.
+ - Backport the 2.0 DeprecationTracker and start alerting users
+ about upcoming deprecations.
+
+- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
+
+- Fix accidental reference to 1488 (#784) [9fb7fe4]
+
+## 1.15.2
+
+### Fixes
+- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
+
+## 1.15.1
+
+### Fixes
+- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
+
+## 1.15.0
+
+### Features
+- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
+- Add support for using template to generate tests (#752) [efb9e69]
+- Add a Chinese Doc #755 (#756) [5207632]
+- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
+
+### Fixes
+- Add _internal to filename of tests created with internal flag (#751) [43c12da]
+
+## 1.14.2
+
+### Fixes
+- correct handling windows backslash in import path (#721) [97f3d51]
+- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
+
+## 1.14.1
+
+### Fixes
+- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
+
+## 1.14.0
+
+### Features
+- Defer running top-level container nodes until RunSpecs is called [d44dedf]
+- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
+- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
+- Print Skip reason in JUnit reporter if one was provided [820dfab]
+
+## 1.13.0
+
+### Features
+- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
+
+### Fixes
+- Ensure integration tests pass in an environment sans GOPATH [606fba2]
+- Add books package (#568) [fc0e44e]
+- doc(readme): installation via "tools package" (#677) [83bb20e]
+- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
+- Import package without dot (#687) [6321024]
+- Fix integration tests to stop require GOPATH (#686) [a912ec5]
+
+## 1.12.3
+
+### Fixes
+- Print correct code location of failing table test (#666) [c6d7afb]
+
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo successful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
+## 1.12.0
+
+### Features
+- Add module definition (#630) [78916ab]
+
+## 1.11.0
+
+### Features
+- Add syscall for riscv64 architecture [f66e896]
+- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
+- teamcity reporter: output newline after every service message (#625) [3cfa02d]
+- Add support for go module when running `generate` command (#578) [9c89e3f]
+
+## 1.10.3
+
+### Fixes
+- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
+- Add integration test [d90e0dc]
+- Fix coverage files combining [e5dde8c]
+- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
+
+## 1.10.2
+
+### Fixes
+- speed up table entry generateIt() (#609) [5049dc5]
+- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
+
+## 1.10.1
+
+### Fixes
+- stack backtrace: fix skipping (#600) [2a4c0bd]
+
+## 1.10.0
+
+### Fixes
+- stack backtrace: fix alignment and skipping [66915d6]
+- fix typo in documentation [8f97b93]
+
+## 1.9.0
+
+### Features
+- Option to print output into report, when tests have passed [0545415]
+
+### Fixes
+- Fixed typos in comments [0ecbc58]
+- gofmt code [a7f8bfb]
+- Simplify code [7454d00]
+- Simplify concatenation, incrementation and function assignment [4825557]
+- Avoid unnecessary conversions [9d9403c]
+- JUnit: include more detailed information about panic [19cca4b]
+- Print help to stdout when the user asks for help [4cb7441]
+
+
+## 1.8.0
+
+### New Features
+- allow config of the vet flag for `go test` (#562) [3cd45fa]
+- Support projects using go modules [d56ee76]
+
+### Fixes and Minor Improvements
+- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
+- Optimize focus to avoid allocations [f493786]
+- Ensure generated test file names are underscored [505cc35]
+
+## 1.7.0
+
+### New Features
+- Add JustAfterEach (#484) [0d4f080]
+
+### Fixes
+- Correctly round suite time in junit reporter [2445fc1]
+- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
+
+## 1.6.0
+
+### New Features
+- add --debug flag to emit node output to files (#499) [39febac]
+
+### Fixes
+- fix: for `go vet` to pass [69338ec]
+- docs: fix for contributing instructions [7004cb1]
+- consolidate and streamline contribution docs (#494) [d848015]
+- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
+- all: gofmt [000d317]
+- Increase eventually timeout to 30s [c73579c]
+- Clarify asynchronous test behavior [294d8f4]
+- Travis badge should only show master [26d2143]
+
+## 1.5.0 5/10/2018
+
+### New Features
+- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
+- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
+- Re-add noisySkippings flag [652e15c]
+- Allow coverage to be displayed for focused specs (#367) [11459a8]
+- Handle -outputdir flag (#364) [228e3a8]
+- Handle -coverprofile flag (#355) [43392d5]
+
+### Fixes
+- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
+- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
+- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
+- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
+- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
+- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
+- Add an extra new line after reporting spec run completion for test2json [874520d]
+- added name name field to junit reported testsuite [ae61c63]
+- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
+- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
+- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
+- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
+- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
+- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
+- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
+- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
+- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
+- Replace GOPATH in Environment [4b883f0]
+
+
+## 1.4.0 7/16/2017
+
+- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
+- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
+- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
+- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
+- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
+- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
+
+## 1.3.0 3/28/2017
+
+Improvements:
+
+- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
+- Support for retrying flaky tests with `--flakeAttempts`
+- `ginkgo ./...` now recurses as you'd expect
+- Added `Specify` a synonym for `It`
+- Support colorise on Windows
+- Broader support for various go compilation flags in the `ginkgo` CLI
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior. Now, this:
+
+ ```golang
+ FDescribe("Some describe", func() {
+ It("A", func() {})
+
+ FIt("B", func() {})
+ })
+ ```
+
+ will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+ - `ginkgo build ` will now compile the package, producing a file named `package.test`
+ - The compiled `package.test` file can be run directly. This runs the tests in series.
+ - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
+- Modified the Reporter interface
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs. Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
new file mode 100644
index 000000000..80de566a5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Ginkgo
+
+Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
+
+- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
+- Ensure adequate test coverage:
+ - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
+ - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
+- Run `make` or:
+ - Install ginkgo locally via `go install ./...`
+ - Make sure all the tests succeed via `ginkgo -r -p`
+ - Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes.
+
+Thanks for supporting Ginkgo!
diff --git a/vendor/github.com/onsi/ginkgo/v2/LICENSE b/vendor/github.com/onsi/ginkgo/v2/LICENSE
new file mode 100644
index 000000000..9415ee72c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile
new file mode 100644
index 000000000..06dff97cd
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/Makefile
@@ -0,0 +1,16 @@
+# default task since it's first
+.PHONY: all
+all: vet test
+
+.PHONY: test
+test:
+ go run github.com/onsi/ginkgo/v2/ginkgo -r -p -randomize-all -keep-going
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+.PHONY: update-deps
+update-deps:
+ go get -u ./...
+ go mod tidy
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/OWNERS b/vendor/github.com/onsi/ginkgo/v2/OWNERS
new file mode 100644
index 000000000..2d1f6c71e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/OWNERS
@@ -0,0 +1,4 @@
+reviewers:
+approvers:
+ - bertinatto
+ - stbenjam
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
new file mode 100644
index 000000000..cb23ffdf6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -0,0 +1,115 @@
+
+
+[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+
+---
+
+# Ginkgo
+
+Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
+
+```go
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ ...
+)
+
+var _ = Describe("Checking books out of the library", Label("library"), func() {
+ var library *libraries.Library
+ var book *books.Book
+ var valjean *users.User
+ BeforeEach(func() {
+ library = libraries.NewClient()
+ book = &books.Book{
+ Title: "Les Miserables",
+ Author: "Victor Hugo",
+ }
+ valjean = users.NewUser("Jean Valjean")
+ })
+
+ When("the library has the book in question", func() {
+ BeforeEach(func(ctx SpecContext) {
+ Expect(library.Store(ctx, book)).To(Succeed())
+ })
+
+ Context("and the book is available", func() {
+ It("lends it to the reader", func(ctx SpecContext) {
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books()).To(ContainElement(book))
+ Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
+ }, SpecTimeout(time.Second * 5))
+ })
+
+ Context("but the book has already been checked out", func() {
+ var javert *users.User
+ BeforeEach(func(ctx SpecContext) {
+ javert = users.NewUser("Javert")
+ Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ })
+
+ It("tells the user", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is currently checked out"))
+ }, SpecTimeout(time.Second * 5))
+
+ It("lets the user place a hold and get notified later", func(ctx SpecContext) {
+ Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Holds(ctx)).To(ContainElement(book))
+
+ By("when Javert returns the book")
+ Expect(javert.Return(ctx, library, book)).To(Succeed())
+
+ By("it eventually informs Valjean")
+ notification := "Les Miserables is ready for pick up"
+ Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
+
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books(ctx)).To(ContainElement(book))
+ Expect(valjean.Holds(ctx)).To(BeEmpty())
+ }, SpecTimeout(time.Second * 10))
+ })
+ })
+
+ When("the library does not have the book in question", func() {
+ It("tells the reader the book is unavailable", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is not in the library catalog"))
+ }, SpecTimeout(time.Second * 5))
+ })
+})
+```
+
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite).
+
+If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
+
+## Capabilities
+
+Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
+
+With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
+
+At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
+
+```bash
+ginkgo -p
+```
+
+By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
+
+As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
+
+Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development.
+
+And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers).
+
+Happy Testing!
+
+## License
+
+Ginkgo is MIT-Licensed
+
+## Contributing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
new file mode 100644
index 000000000..363815d7c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
@@ -0,0 +1,23 @@
+A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
+
+1. Ensure CHANGELOG.md is up to date.
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
+ - Categorize the changes into
+ - Breaking Changes (requires a major version)
+ - New Features (minor version)
+ - Fixes (fix version)
+ - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
+1. Update `VERSION` in `types/version.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
new file mode 100644
index 000000000..a61021d08
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
@@ -0,0 +1,69 @@
+package config
+
+// GinkgoConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type GinkgoConfigType = DeprecatedGinkgoConfigType
+type DeprecatedGinkgoConfigType struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ RegexScansFilePath bool
+ FocusStrings []string
+ SkipStrings []string
+ SkipMeasurements bool
+ FailOnPending bool
+ FailFast bool
+ FlakeAttempts int
+ EmitSpecProgress bool
+ DryRun bool
+ DebugParallel bool
+
+ ParallelNode int
+ ParallelTotal int
+ SyncHost string
+ StreamHost string
+}
+
+// DefaultReporterConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
+type DeprecatedDefaultReporterConfigType struct {
+ NoColor bool
+ SlowSpecThreshold float64
+ NoisyPendings bool
+ NoisySkippings bool
+ Succinct bool
+ Verbose bool
+ FullTrace bool
+ ReportPassed bool
+ ReportFile string
+}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
new file mode 100644
index 000000000..a3e8237e9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -0,0 +1,847 @@
+/*
+Ginkgo is a testing framework for Go designed to help you write expressive tests.
+https://github.com/onsi/ginkgo
+MIT-Licensed
+
+The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to
+build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
+You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
+
+Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
+
+You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality
+that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
+or by running 'ginkgo help'.
+*/
+package ginkgo
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-logr/logr"
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const GINKGO_VERSION = types.VERSION
+
+var flagSet types.GinkgoFlagSet
+var deprecationTracker = types.NewDeprecationTracker()
+var suiteConfig = types.NewDefaultSuiteConfig()
+var reporterConfig = types.NewDefaultReporterConfig()
+var suiteDidRun = false
+var outputInterceptor internal.OutputInterceptor
+var client parallel_support.Client
+
+func init() {
+ var err error
+ flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
+ exitIfErr(err)
+ writer := internal.NewWriter(os.Stdout)
+ GinkgoWriter = writer
+ GinkgoLogr = internal.GinkgoLogrFunc(writer)
+}
+
+func exitIfErr(err error) {
+ if err != nil {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ os.Exit(1)
+ }
+}
+
+func exitIfErrors(errors []error) {
+ if len(errors) > 0 {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ for _, err := range errors {
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+}
+
+// The interface implemented by GinkgoWriter
+type GinkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+
+ TeeTo(writer io.Writer)
+ ClearTeeWriters()
+}
+
+/*
+SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
+
+You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
+
+Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
+*/
+type SpecContext = internal.SpecContext
+
+/*
+GinkgoWriter implements a GinkgoWriterInterface and io.Writer
+
+When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
+to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+only if the current test fails.
+
+GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
+Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
+
+You can learn more at https://onsi.github.io/ginkgo/#logging-output
+*/
+var GinkgoWriter GinkgoWriterInterface
+
+/*
+GinkgoLogr is a logr.Logger that writes to GinkgoWriter
+*/
+var GinkgoLogr logr.Logger
+
+// The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+ Fail()
+}
+
+/*
+GinkgoConfiguration returns the configuration of the current suite.
+
+The first return value is the SuiteConfig which controls aspects of how the suite runs,
+the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
+reporter emits output.
+
+Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need
+to pass in your mutated copies into RunSpecs().
+
+You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
+*/
+func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
+ return suiteConfig, reporterConfig
+}
+
+/*
+GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
+useful for seeding your own pseudorandom number generators to ensure
+consistent executions from run to run, where your tests contain variability (for
+example, when selecting random spec data).
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
+*/
+func GinkgoRandomSeed() int64 {
+ return suiteConfig.RandomSeed
+}
+
+/*
+GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared
+resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
+
+For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
+*/
+func GinkgoParallelProcess() int {
+ return suiteConfig.ParallelProcess
+}
+
+/*
+GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
+
+This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
+*/
+func GinkgoHelper() {
+ types.MarkAsHelper(1)
+}
+
+/*
+GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
+
+You can use this to manually check if a set of labels would satisfy the filter via:
+
+ if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
+ //...
+ }
+*/
+func GinkgoLabelFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.LabelFilter
+}
+
+/*
+PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
+when running in parallel and output to stdout/stderr is being intercepted. You generally
+don't need to call this function - however there are cases when Ginkgo's output interception
+mechanisms can interfere with external processes launched by the test process.
+
+In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
+then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
+If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
+then ResumeOutputInterception() after starting it.
+
+Note that PauseOutputInterception() does not cause stdout writes to print to the console -
+this simply stops intercepting and storing stdout writes to an internal buffer.
+*/
+func PauseOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.PauseIntercepting()
+}
+
+// ResumeOutputInterception() - see docs for PauseOutputInterception()
+func ResumeOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.ResumeIntercepting()
+}
+
+/*
+RunSpecs is the entry point for the Ginkgo spec runner.
+
+You must call this within a Golang testing TestX(t *testing.T) function.
+If you bootstrapped your suite with "ginkgo bootstrap" this is already
+done for you.
+
+Ginkgo is typically configured via command-line flags. This configuration
+can be overridden, however, and passed into RunSpecs as optional arguments:
+
+ func TestMySuite(t *testing.T) {
+ RegisterFailHandler(gomega.Fail)
+ // fetch the current config
+ suiteConfig, reporterConfig := GinkgoConfiguration()
+ // adjust it
+ suiteConfig.SkipStrings = []string{"NEVER-RUN"}
+ reporterConfig.FullTrace = true
+ // pass it in to RunSpecs
+ RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
+ }
+
+Note that some configuration changes can lead to undefined behavior. For example,
+you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
+for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization
+for more on how specs are parallelized in Ginkgo.
+
+You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
+*/
+func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+ if suiteDidRun {
+ exitIfErr(types.GinkgoErrors.RerunningSuite())
+ }
+ suiteDidRun = true
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+
+ var reporter reporters.Reporter
+ if suiteConfig.ParallelTotal == 1 {
+ reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ } else {
+ reporter = reporters.NoopReporter{}
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "swap":
+ outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
+ case "none":
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ default:
+ outputInterceptor = internal.NewOutputInterceptor()
+ }
+ client = parallel_support.NewClient(suiteConfig.ParallelHost)
+ if !client.Connect() {
+ client = nil
+ exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
+ }
+ defer client.Close()
+ }
+
+ writer := GinkgoWriter.(*internal.Writer)
+ if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
+ writer.SetMode(internal.WriterModeStreamAndBuffer)
+ } else {
+ writer.SetMode(internal.WriterModeBufferOnly)
+ }
+
+ if reporterConfig.WillGenerateReport() {
+ registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
+ }
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ outputInterceptor.Shutdown()
+
+ flagSet.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
+ }
+
+ if !passed {
+ t.Fail()
+ }
+
+ if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Println("PASS | FOCUSED")
+ os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+ }
+ return passed
+}
+
+func extractSuiteConfiguration(args []interface{}) Labels {
+ suiteLabels := Labels{}
+ configErrors := []error{}
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case types.SuiteConfig:
+ suiteConfig = arg
+ case types.ReporterConfig:
+ reporterConfig = arg
+ case Labels:
+ suiteLabels = append(suiteLabels, arg...)
+ default:
+ configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
+ }
+ }
+ exitIfErrors(configErrors)
+
+ configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
+ if len(configErrors) > 0 {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
+ for _, err := range configErrors {
+ fmt.Fprintf(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+
+ return suiteLabels
+}
+
+func getwd() (string, error) {
+ if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
+ // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
+ // is shared between two different directories with the same test code.
+ return os.Getwd()
+ }
+ return "", nil
+}
+
+/*
+PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
+See http://onsi.github.io/ginkgo/#previewing-specs for more information.
+*/
+func PreviewSpecs(description string, args ...any) Report {
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+ priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
+ defer func() {
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = priorDryRun, priorParallelTotal, priorParallelProcess
+ }()
+ reporter := reporters.NoopReporter{}
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ writer := GinkgoWriter.(*internal.Writer)
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+
+ return global.Suite.GetPreviewReport()
+}
+
+/*
+Skip instructs Ginkgo to skip the current spec
+
+You can call Skip in any Setup or Subject node closure.
+
+For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
+*/
+func Skip(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Skip(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+
+Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with
+the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must
+add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
+
+You can call Fail in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func Fail(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Fail(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
+
+You can call AbortSuite in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
+*/
+func AbortSuite(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.AbortSuite(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
+the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
+*/
+type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
+
+/*
+GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+calls out to Gomega
+
+Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you,
+however if a panic originates on a goroutine *launched* from one of your specs there's no
+way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func GinkgoRecover() {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(ignorablePanic); ok {
+ return
+ }
+ global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ }
+}
+
+// pushNode is used by the various test construction DSL methods to push nodes onto the suite
+// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
+func pushNode(node internal.Node, errors []error) bool {
+ exitIfErrors(errors)
+ exitIfErr(global.Suite.PushNode(node))
+ return true
+}
+
+/*
+Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
+Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
+
+Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic
+to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
+
+You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
+In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Describe(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+FDescribe focuses specs within the Describe block.
+*/
+func FDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+PDescribe marks specs within the Describe block as pending.
+*/
+func PDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+XDescribe marks specs within the Describe block as pending.
+
+XDescribe is an alias for PDescribe
+*/
+var XDescribe = PDescribe
+
+/* Context is an alias for Describe - it generates the exact same kind of Container node */
+var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func When(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func FWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func PWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+var XWhen = PWhen
+
+/*
+It nodes are Subject nodes that contain your spec code and assertions.
+
+Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure.
+
+You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal.
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
+In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func It(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+FIt allows you to focus an individual It.
+*/
+func FIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+PIt allows you to mark an individual It as pending.
+*/
+func PIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+XIt allows you to mark an individual It as pending.
+
+XIt is an alias for PIt
+*/
+var XIt = PIt
+
+/*
+Specify is an alias for It - it can allow for more natural wording in some context.
+*/
+var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
+
+/*
+By allows you to better document complex Specs.
+
+Generally you should try to keep your Its short and to the point. This is not always possible, however,
+especially in the context of integration tests that capture complex or lengthy workflows.
+
+By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...)
+and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
+
+By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
+
+Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
+You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
+*/
+func By(text string, callback ...func()) {
+ exitIfErr(global.Suite.By(text, callback...))
+}
+
+/*
+BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
+When running in parallel, each parallel process will call BeforeSuite.
+
+You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func BeforeSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+}
+
+/*
+AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
+AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
+
+When running in parallel, each parallel process will call AfterSuite.
+
+You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func AfterSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
+from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing
+information from that setup to all parallel processes.
+
+SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
+The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures:
+
+The first function (which only runs on process #1) can have any of the following the signatures:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func() []byte
+ func(ctx context.Context) []byte
+ func(ctx SpecContext) []byte
+
+The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func(data []byte)
+ func(ctx context.Context, data []byte)
+ func(ctx SpecContext, data []byte)
+
+If either function receives a context.Context/SpecContext it is considered interruptible.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{process1Body, allProcessBody}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
+and a piece that must only run once - on process #1.
+
+SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1
+and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until
+all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext))
+
+Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{allProcessBody, process1Body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+}
+
+/*
+BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes
+are defined in nested Container nodes the outermost BeforeEach node closures are run first.
+
+BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
+*/
+func BeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+}
+
+/*
+JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
+This can allow you to separate configuration from creation of resources for a spec.
+
+JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
+*/
+func JustBeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+}
+
+/*
+AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes
+are defined in nested Container nodes the innermost AfterEach node closures are run first.
+
+Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
+
+AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
+*/
+func AfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+}
+
+/*
+JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
+
+JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
+*/
+func JustAfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+}
+
+/*
+BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run.
+
+Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func BeforeAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+}
+
+/*
+AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run.
+
+Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
+
+AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func AfterAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+}
+
+/*
+DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
+
+DeferCleanup can be passed:
+1. A function that takes no arguments and returns no values.
+2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec).
+3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt.
+4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+
+For example:
+
+ BeforeEach(func() {
+ DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
+ os.Setenv("FOO", "BAR")
+ })
+
+will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
+
+Similarly:
+
+ BeforeEach(func() {
+ DeferCleanup(func(ctx SpecContext, path) {
+ req, err := http.NewRequestWithContext(ctx, "POST", path, nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+ }, "example.com/cleanup", NodeTimeout(time.Second*3))
+ })
+
+will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)).
+
+When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
+When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
+When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
+
+Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
+You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
+*/
+func DeferCleanup(args ...interface{}) {
+ fail := func(message string, cl types.CodeLocation) {
+ global.Failer.Fail(message, cl)
+ }
+ pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
+}
+
+/*
+AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
+
+**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
+
+Progress Reports are generated:
+- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
+- on nodes decorated with PollProgressAfter
+- on suites run with --poll-progress-after
+- whenever a test times out
+
+Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
+
+# AttachProgressReporter returns a function that can be called to detach the progress reporter
+
+You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
+*/
+func AttachProgressReporter(reporter func() string) func() {
+ return global.Suite.AttachProgressReporter(reporter)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go
new file mode 100644
index 000000000..bf60ceb52
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl_patch.go
@@ -0,0 +1,33 @@
+package ginkgo
+
+import (
+ "io"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func AppendSpecText(test *internal.Spec, text string) {
+ test.AppendText(text)
+}
+
+func GetSuite() *internal.Suite {
+ return global.Suite
+}
+
+func GetFailer() *internal.Failer {
+ return global.Failer
+}
+
+func NewWriter(w io.Writer) *internal.Writer {
+ return internal.NewWriter(w)
+}
+
+func GetWriter() *internal.Writer {
+ return GinkgoWriter.(*internal.Writer)
+}
+
+func SetReporterConfig(r types.ReporterConfig) {
+ reporterConfig = r
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
new file mode 100644
index 000000000..c65af4ce1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -0,0 +1,143 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+/*
+Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type Offset = internal.Offset
+
+/*
+FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type FlakeAttempts = internal.FlakeAttempts
+
+/*
+MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type MustPassRepeatedly = internal.MustPassRepeatedly
+
+/*
+Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Focus = internal.Focus
+
+/*
+Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Pending = internal.Pending
+
+/*
+Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
+Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
+
+You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Serial = internal.Serial
+
+/*
+Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
+They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Ordered = internal.Ordered
+
+/*
+ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
+
+ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const ContinueOnFailure = internal.ContinueOnFailure
+
+/*
+OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
+per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
+The behavior for non-Ordered containers/specs is unchanged.
+
+You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const OncePerOrdered = internal.OncePerOrdered
+
+/*
+Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/".
+Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Label(labels ...string) Labels {
+ return Labels(labels)
+}
+
+/*
+Labels are the type for spec Label decorators. Use Label(...) to construct Labels.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+*/
+type Labels = internal.Labels
+
+/*
+PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
+
+Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec.
+*/
+type PollProgressAfter = internal.PollProgressAfter
+
+/*
+PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node.
+
+Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval.
+*/
+type PollProgressInterval = internal.PollProgressInterval
+
+/*
+NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context).
+
+If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type NodeTimeout = internal.NodeTimeout
+
+/*
+SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes.
+
+All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout.
+
+If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type SpecTimeout = internal.SpecTimeout
+
+/*
+GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence.
+
+Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user.
+*/
+type GracePeriod = internal.GracePeriod
+
+/*
+SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
+if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
+*/
+const SuppressProgressReporting = internal.SuppressProgressReporting
diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
new file mode 100644
index 000000000..f912bbec6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -0,0 +1,135 @@
+package ginkgo
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Deprecated: Done Channel for asynchronous testing
+
+The Done channel pattern is no longer supported in Ginkgo 2.0.
+See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing
+
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing
+*/
+type Done = internal.Done
+
+/*
+Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0.
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+type Reporter = reporters.DeprecatedReporter
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: GinkgoTestDescription has been replaced with SpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type DeprecatedGinkgoTestDescription struct {
+ FullTestText string
+ ComponentTexts []string
+ TestText string
+
+ FileName string
+ LineNumber int
+
+ Failed bool
+ Duration time.Duration
+}
+type GinkgoTestDescription = DeprecatedGinkgoTestDescription
+
+/*
+Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.CurrentGinkgoTestDescription(),
+ types.NewCodeLocation(1),
+ )
+ report := global.Suite.CurrentSpecReport()
+ if report.State == types.SpecStateInvalid {
+ return GinkgoTestDescription{}
+ }
+ componentTexts := []string{}
+ componentTexts = append(componentTexts, report.ContainerHierarchyTexts...)
+ componentTexts = append(componentTexts, report.LeafNodeText)
+
+ return DeprecatedGinkgoTestDescription{
+ ComponentTexts: componentTexts,
+ FullTestText: report.FullText(),
+ TestText: report.LeafNodeText,
+ FileName: report.LeafNodeLocation.FileName,
+ LineNumber: report.LeafNodeLocation.LineNumber,
+ Failed: report.State.Is(types.SpecStateFailureStates),
+ Duration: report.RunTime,
+ }
+}
+
+/*
+Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess()
+*/
+func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.ParallelNode(),
+ types.NewCodeLocation(1),
+ )
+ return GinkgoParallelProcess()
+}
+
+/*
+Deprecated: Benchmarker has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+type Benchmarker interface {
+ Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...interface{})
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+}
+
+/*
+Deprecated: Measure() has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+func Measure(_ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
+ return true
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
new file mode 100644
index 000000000..778bfd7c7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "io"
+ "os"
+)
+
+func newColorable(file *os.File) io.Writer {
+ return file
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
new file mode 100644
index 000000000..dd1d143cc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
@@ -0,0 +1,809 @@
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+func isTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+const (
+ foregroundBlue = 0x1
+ foregroundGreen = 0x2
+ foregroundRed = 0x4
+ foregroundIntensity = 0x8
+ foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+ backgroundBlue = 0x10
+ backgroundGreen = 0x20
+ backgroundRed = 0x40
+ backgroundIntensity = 0x80
+ backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+ x short
+ y short
+}
+
+type smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+}
+
+type consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+}
+
+type writer struct {
+ out io.Writer
+ handle syscall.Handle
+ lastbuf bytes.Buffer
+ oldattr word
+}
+
+func newColorable(file *os.File) io.Writer {
+ if file == nil {
+ panic("nil passed instead of *os.File to NewColorable()")
+ }
+
+ if isTerminal(file.Fd()) {
+ var csbi consoleScreenBufferInfo
+ handle := syscall.Handle(file.Fd())
+ procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+ return &writer{out: file, handle: handle, oldattr: csbi.attributes}
+ } else {
+ return file
+ }
+}
+
+var color256 = map[int]int{
+ 0: 0x000000,
+ 1: 0x800000,
+ 2: 0x008000,
+ 3: 0x808000,
+ 4: 0x000080,
+ 5: 0x800080,
+ 6: 0x008080,
+ 7: 0xc0c0c0,
+ 8: 0x808080,
+ 9: 0xff0000,
+ 10: 0x00ff00,
+ 11: 0xffff00,
+ 12: 0x0000ff,
+ 13: 0xff00ff,
+ 14: 0x00ffff,
+ 15: 0xffffff,
+ 16: 0x000000,
+ 17: 0x00005f,
+ 18: 0x000087,
+ 19: 0x0000af,
+ 20: 0x0000d7,
+ 21: 0x0000ff,
+ 22: 0x005f00,
+ 23: 0x005f5f,
+ 24: 0x005f87,
+ 25: 0x005faf,
+ 26: 0x005fd7,
+ 27: 0x005fff,
+ 28: 0x008700,
+ 29: 0x00875f,
+ 30: 0x008787,
+ 31: 0x0087af,
+ 32: 0x0087d7,
+ 33: 0x0087ff,
+ 34: 0x00af00,
+ 35: 0x00af5f,
+ 36: 0x00af87,
+ 37: 0x00afaf,
+ 38: 0x00afd7,
+ 39: 0x00afff,
+ 40: 0x00d700,
+ 41: 0x00d75f,
+ 42: 0x00d787,
+ 43: 0x00d7af,
+ 44: 0x00d7d7,
+ 45: 0x00d7ff,
+ 46: 0x00ff00,
+ 47: 0x00ff5f,
+ 48: 0x00ff87,
+ 49: 0x00ffaf,
+ 50: 0x00ffd7,
+ 51: 0x00ffff,
+ 52: 0x5f0000,
+ 53: 0x5f005f,
+ 54: 0x5f0087,
+ 55: 0x5f00af,
+ 56: 0x5f00d7,
+ 57: 0x5f00ff,
+ 58: 0x5f5f00,
+ 59: 0x5f5f5f,
+ 60: 0x5f5f87,
+ 61: 0x5f5faf,
+ 62: 0x5f5fd7,
+ 63: 0x5f5fff,
+ 64: 0x5f8700,
+ 65: 0x5f875f,
+ 66: 0x5f8787,
+ 67: 0x5f87af,
+ 68: 0x5f87d7,
+ 69: 0x5f87ff,
+ 70: 0x5faf00,
+ 71: 0x5faf5f,
+ 72: 0x5faf87,
+ 73: 0x5fafaf,
+ 74: 0x5fafd7,
+ 75: 0x5fafff,
+ 76: 0x5fd700,
+ 77: 0x5fd75f,
+ 78: 0x5fd787,
+ 79: 0x5fd7af,
+ 80: 0x5fd7d7,
+ 81: 0x5fd7ff,
+ 82: 0x5fff00,
+ 83: 0x5fff5f,
+ 84: 0x5fff87,
+ 85: 0x5fffaf,
+ 86: 0x5fffd7,
+ 87: 0x5fffff,
+ 88: 0x870000,
+ 89: 0x87005f,
+ 90: 0x870087,
+ 91: 0x8700af,
+ 92: 0x8700d7,
+ 93: 0x8700ff,
+ 94: 0x875f00,
+ 95: 0x875f5f,
+ 96: 0x875f87,
+ 97: 0x875faf,
+ 98: 0x875fd7,
+ 99: 0x875fff,
+ 100: 0x878700,
+ 101: 0x87875f,
+ 102: 0x878787,
+ 103: 0x8787af,
+ 104: 0x8787d7,
+ 105: 0x8787ff,
+ 106: 0x87af00,
+ 107: 0x87af5f,
+ 108: 0x87af87,
+ 109: 0x87afaf,
+ 110: 0x87afd7,
+ 111: 0x87afff,
+ 112: 0x87d700,
+ 113: 0x87d75f,
+ 114: 0x87d787,
+ 115: 0x87d7af,
+ 116: 0x87d7d7,
+ 117: 0x87d7ff,
+ 118: 0x87ff00,
+ 119: 0x87ff5f,
+ 120: 0x87ff87,
+ 121: 0x87ffaf,
+ 122: 0x87ffd7,
+ 123: 0x87ffff,
+ 124: 0xaf0000,
+ 125: 0xaf005f,
+ 126: 0xaf0087,
+ 127: 0xaf00af,
+ 128: 0xaf00d7,
+ 129: 0xaf00ff,
+ 130: 0xaf5f00,
+ 131: 0xaf5f5f,
+ 132: 0xaf5f87,
+ 133: 0xaf5faf,
+ 134: 0xaf5fd7,
+ 135: 0xaf5fff,
+ 136: 0xaf8700,
+ 137: 0xaf875f,
+ 138: 0xaf8787,
+ 139: 0xaf87af,
+ 140: 0xaf87d7,
+ 141: 0xaf87ff,
+ 142: 0xafaf00,
+ 143: 0xafaf5f,
+ 144: 0xafaf87,
+ 145: 0xafafaf,
+ 146: 0xafafd7,
+ 147: 0xafafff,
+ 148: 0xafd700,
+ 149: 0xafd75f,
+ 150: 0xafd787,
+ 151: 0xafd7af,
+ 152: 0xafd7d7,
+ 153: 0xafd7ff,
+ 154: 0xafff00,
+ 155: 0xafff5f,
+ 156: 0xafff87,
+ 157: 0xafffaf,
+ 158: 0xafffd7,
+ 159: 0xafffff,
+ 160: 0xd70000,
+ 161: 0xd7005f,
+ 162: 0xd70087,
+ 163: 0xd700af,
+ 164: 0xd700d7,
+ 165: 0xd700ff,
+ 166: 0xd75f00,
+ 167: 0xd75f5f,
+ 168: 0xd75f87,
+ 169: 0xd75faf,
+ 170: 0xd75fd7,
+ 171: 0xd75fff,
+ 172: 0xd78700,
+ 173: 0xd7875f,
+ 174: 0xd78787,
+ 175: 0xd787af,
+ 176: 0xd787d7,
+ 177: 0xd787ff,
+ 178: 0xd7af00,
+ 179: 0xd7af5f,
+ 180: 0xd7af87,
+ 181: 0xd7afaf,
+ 182: 0xd7afd7,
+ 183: 0xd7afff,
+ 184: 0xd7d700,
+ 185: 0xd7d75f,
+ 186: 0xd7d787,
+ 187: 0xd7d7af,
+ 188: 0xd7d7d7,
+ 189: 0xd7d7ff,
+ 190: 0xd7ff00,
+ 191: 0xd7ff5f,
+ 192: 0xd7ff87,
+ 193: 0xd7ffaf,
+ 194: 0xd7ffd7,
+ 195: 0xd7ffff,
+ 196: 0xff0000,
+ 197: 0xff005f,
+ 198: 0xff0087,
+ 199: 0xff00af,
+ 200: 0xff00d7,
+ 201: 0xff00ff,
+ 202: 0xff5f00,
+ 203: 0xff5f5f,
+ 204: 0xff5f87,
+ 205: 0xff5faf,
+ 206: 0xff5fd7,
+ 207: 0xff5fff,
+ 208: 0xff8700,
+ 209: 0xff875f,
+ 210: 0xff8787,
+ 211: 0xff87af,
+ 212: 0xff87d7,
+ 213: 0xff87ff,
+ 214: 0xffaf00,
+ 215: 0xffaf5f,
+ 216: 0xffaf87,
+ 217: 0xffafaf,
+ 218: 0xffafd7,
+ 219: 0xffafff,
+ 220: 0xffd700,
+ 221: 0xffd75f,
+ 222: 0xffd787,
+ 223: 0xffd7af,
+ 224: 0xffd7d7,
+ 225: 0xffd7ff,
+ 226: 0xffff00,
+ 227: 0xffff5f,
+ 228: 0xffff87,
+ 229: 0xffffaf,
+ 230: 0xffffd7,
+ 231: 0xffffff,
+ 232: 0x080808,
+ 233: 0x121212,
+ 234: 0x1c1c1c,
+ 235: 0x262626,
+ 236: 0x303030,
+ 237: 0x3a3a3a,
+ 238: 0x444444,
+ 239: 0x4e4e4e,
+ 240: 0x585858,
+ 241: 0x626262,
+ 242: 0x6c6c6c,
+ 243: 0x767676,
+ 244: 0x808080,
+ 245: 0x8a8a8a,
+ 246: 0x949494,
+ 247: 0x9e9e9e,
+ 248: 0xa8a8a8,
+ 249: 0xb2b2b2,
+ 250: 0xbcbcbc,
+ 251: 0xc6c6c6,
+ 252: 0xd0d0d0,
+ 253: 0xdadada,
+ 254: 0xe4e4e4,
+ 255: 0xeeeeee,
+}
+
+func (w *writer) Write(data []byte) (n int, err error) {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+ er := bytes.NewBuffer(data)
+loop:
+ for {
+ r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ if r1 == 0 {
+ break loop
+ }
+
+ c1, _, err := er.ReadRune()
+ if err != nil {
+ break loop
+ }
+ if c1 != 0x1b {
+ fmt.Fprint(w.out, string(c1))
+ continue
+ }
+ c2, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ break loop
+ }
+ if c2 != 0x5b {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ continue
+ }
+
+ var buf bytes.Buffer
+ var m rune
+ for {
+ c, _, err := er.ReadRune()
+ if err != nil {
+ w.lastbuf.WriteRune(c1)
+ w.lastbuf.WriteRune(c2)
+ w.lastbuf.Write(buf.Bytes())
+ break loop
+ }
+ if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+ m = c
+ break
+ }
+ buf.Write([]byte(string(c)))
+ }
+
+ var csbi consoleScreenBufferInfo
+ switch m {
+ case 'A':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'B':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'C':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'D':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ if n, err = strconv.Atoi(buf.String()); err == nil {
+ var csbi consoleScreenBufferInfo
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ }
+ case 'E':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y += short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'F':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = 0
+ csbi.cursorPosition.y -= short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'G':
+ n, err = strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+ csbi.cursorPosition.x = short(n)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'H':
+ token := strings.Split(buf.String(), ";")
+ if len(token) != 2 {
+ continue
+ }
+ n1, err := strconv.Atoi(token[0])
+ if err != nil {
+ continue
+ }
+ n2, err := strconv.Atoi(token[1])
+ if err != nil {
+ continue
+ }
+ csbi.cursorPosition.x = short(n2)
+ csbi.cursorPosition.x = short(n1)
+ procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+ case 'J':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'K':
+ n, err := strconv.Atoi(buf.String())
+ if err != nil {
+ continue
+ }
+ var cursor coord
+ switch n {
+ case 0:
+ cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+ case 1:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ case 2:
+ cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+ }
+ var count, written dword
+ count = dword(csbi.size.x - csbi.cursorPosition.x)
+ procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+ case 'm':
+ attr := csbi.attributes
+ cs := buf.String()
+ if cs == "" {
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+ continue
+ }
+ token := strings.Split(cs, ";")
+ for i := 0; i < len(token); i += 1 {
+ ns := token[i]
+ if n, err = strconv.Atoi(ns); err == nil {
+ switch {
+ case n == 0 || n == 100:
+ attr = w.oldattr
+ case 1 <= n && n <= 5:
+ attr |= foregroundIntensity
+ case n == 7:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 22 == n || n == 25 || n == 25:
+ attr |= foregroundIntensity
+ case n == 27:
+ attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case 30 <= n && n <= 37:
+ attr = (attr & backgroundMask)
+ if (n-30)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-30)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-30)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case n == 38: // set foreground color.
+ if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256foreAttr == nil {
+ n256setup()
+ }
+ attr &= backgroundMask
+ attr |= n256foreAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & backgroundMask)
+ }
+ case n == 39: // reset foreground color.
+ attr &= backgroundMask
+ attr |= w.oldattr & foregroundMask
+ case 40 <= n && n <= 47:
+ attr = (attr & foregroundMask)
+ if (n-40)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-40)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-40)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ case n == 48: // set background color.
+ if i < len(token)-2 && token[i+1] == "5" {
+ if n256, err := strconv.Atoi(token[i+2]); err == nil {
+ if n256backAttr == nil {
+ n256setup()
+ }
+ attr &= foregroundMask
+ attr |= n256backAttr[n256]
+ i += 2
+ }
+ } else {
+ attr = attr & (w.oldattr & foregroundMask)
+ }
+ case n == 49: // reset foreground color.
+ attr &= foregroundMask
+ attr |= w.oldattr & backgroundMask
+ case 90 <= n && n <= 97:
+ attr = (attr & backgroundMask)
+ attr |= foregroundIntensity
+ if (n-90)&1 != 0 {
+ attr |= foregroundRed
+ }
+ if (n-90)&2 != 0 {
+ attr |= foregroundGreen
+ }
+ if (n-90)&4 != 0 {
+ attr |= foregroundBlue
+ }
+ case 100 <= n && n <= 107:
+ attr = (attr & foregroundMask)
+ attr |= backgroundIntensity
+ if (n-100)&1 != 0 {
+ attr |= backgroundRed
+ }
+ if (n-100)&2 != 0 {
+ attr |= backgroundGreen
+ }
+ if (n-100)&4 != 0 {
+ attr |= backgroundBlue
+ }
+ }
+ procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+ }
+ }
+ }
+ }
+ return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+ rgb int
+ red bool
+ green bool
+ blue bool
+ intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+ if c.red {
+ attr |= foregroundRed
+ }
+ if c.green {
+ attr |= foregroundGreen
+ }
+ if c.blue {
+ attr |= foregroundBlue
+ }
+ if c.intensity {
+ attr |= foregroundIntensity
+ }
+ return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+ if c.red {
+ attr |= backgroundRed
+ }
+ if c.green {
+ attr |= backgroundGreen
+ }
+ if c.blue {
+ attr |= backgroundBlue
+ }
+ if c.intensity {
+ attr |= backgroundIntensity
+ }
+ return
+}
+
+var color16 = []consoleColor{
+ consoleColor{0x000000, false, false, false, false},
+ consoleColor{0x000080, false, false, true, false},
+ consoleColor{0x008000, false, true, false, false},
+ consoleColor{0x008080, false, true, true, false},
+ consoleColor{0x800000, true, false, false, false},
+ consoleColor{0x800080, true, false, true, false},
+ consoleColor{0x808000, true, true, false, false},
+ consoleColor{0xc0c0c0, true, true, true, false},
+ consoleColor{0x808080, false, false, false, true},
+ consoleColor{0x0000ff, false, false, true, true},
+ consoleColor{0x00ff00, false, true, false, true},
+ consoleColor{0x00ffff, false, true, true, true},
+ consoleColor{0xff0000, true, false, false, true},
+ consoleColor{0xff00ff, true, false, true, true},
+ consoleColor{0xffff00, true, true, false, true},
+ consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+ h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+ dh := a.h - b.h
+ switch {
+ case dh > 0.5:
+ dh = 1 - dh
+ case dh < -0.5:
+ dh = -1 - dh
+ }
+ ds := a.s - b.s
+ dv := a.v - b.v
+ return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+ r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+ float32((rgb&0x00FF00)>>8)/256.0,
+ float32(rgb&0x0000FF)/256.0
+ min, max := minmax3f(r, g, b)
+ h := max - min
+ if h > 0 {
+ if max == r {
+ h = (g - b) / h
+ if h < 0 {
+ h += 6
+ }
+ } else if max == g {
+ h = 2 + (b-r)/h
+ } else {
+ h = 4 + (r-g)/h
+ }
+ }
+ h /= 6.0
+ s := max - min
+ if max != 0 {
+ s /= max
+ }
+ v := max
+ return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+ t := make(hsvTable, len(rgbTable))
+ for i, c := range rgbTable {
+ t[i] = toHSV(c.rgb)
+ }
+ return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+ hsv := toHSV(rgb)
+ n := 7
+ l := float32(5.0)
+ for i, p := range t {
+ d := hsv.dist(p)
+ if d < l {
+ l, n = d, i
+ }
+ }
+ return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+ if a < b {
+ if b < c {
+ return a, c
+ } else if a < c {
+ return a, b
+ } else {
+ return c, b
+ }
+ } else {
+ if a < c {
+ return b, c
+ } else if b < c {
+ return b, a
+ } else {
+ return c, a
+ }
+ }
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+ n256foreAttr = make([]word, 256)
+ n256backAttr = make([]word, 256)
+ t := toHSVTable(color16)
+ for i, rgb := range color256 {
+ c := t.find(rgb)
+ n256foreAttr[i] = c.foregroundAttr()
+ n256backAttr[i] = c.backgroundAttr()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
new file mode 100644
index 000000000..4d5749114
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -0,0 +1,234 @@
+package formatter
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// ColorableStdOut and ColorableStdErr enable color output support on Windows
+var ColorableStdOut = newColorable(os.Stdout)
+var ColorableStdErr = newColorable(os.Stderr)
+
+const COLS = 80
+
+type ColorMode uint8
+
+const (
+ ColorModeNone ColorMode = iota
+ ColorModeTerminal
+ ColorModePassthrough
+)
+
+var SingletonFormatter = New(ColorModeTerminal)
+
+func F(format string, args ...interface{}) string {
+ return SingletonFormatter.F(format, args...)
+}
+
+func Fi(indentation uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fi(indentation, format, args...)
+}
+
+func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
+}
+
+type Formatter struct {
+ ColorMode ColorMode
+ colors map[string]string
+ styleRe *regexp.Regexp
+ preserveColorStylingTags bool
+}
+
+func NewWithNoColorBool(noColor bool) Formatter {
+ if noColor {
+ return New(ColorModeNone)
+ }
+ return New(ColorModeTerminal)
+}
+
+func New(colorMode ColorMode) Formatter {
+ colorAliases := map[string]int{
+ "black": 0,
+ "red": 1,
+ "green": 2,
+ "yellow": 3,
+ "blue": 4,
+ "magenta": 5,
+ "cyan": 6,
+ "white": 7,
+ }
+ for colorAlias, n := range colorAliases {
+ colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
+ }
+
+ getColor := func(color, defaultEscapeCode string) string {
+ color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
+ envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
+ envVarColor := os.Getenv(envVar)
+ if envVarColor == "" {
+ return defaultEscapeCode
+ }
+ if colorCode, ok := colorAliases[envVarColor]; ok {
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+ colorCode, err := strconv.Atoi(envVarColor)
+ if err != nil || colorCode < 0 || colorCode > 255 {
+ return defaultEscapeCode
+ }
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+
+ if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor {
+ colorMode = ColorModeNone
+ }
+
+ f := Formatter{
+ ColorMode: colorMode,
+ colors: map[string]string{
+ "/": "\x1b[0m",
+ "bold": "\x1b[1m",
+ "underline": "\x1b[4m",
+
+ "red": getColor("red", "\x1b[38;5;9m"),
+ "orange": getColor("orange", "\x1b[38;5;214m"),
+ "coral": getColor("coral", "\x1b[38;5;204m"),
+ "magenta": getColor("magenta", "\x1b[38;5;13m"),
+ "green": getColor("green", "\x1b[38;5;10m"),
+ "dark-green": getColor("dark-green", "\x1b[38;5;28m"),
+ "yellow": getColor("yellow", "\x1b[38;5;11m"),
+ "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
+ "cyan": getColor("cyan", "\x1b[38;5;14m"),
+ "gray": getColor("gray", "\x1b[38;5;243m"),
+ "light-gray": getColor("light-gray", "\x1b[38;5;246m"),
+ "blue": getColor("blue", "\x1b[38;5;12m"),
+ },
+ }
+ colors := []string{}
+ for color := range f.colors {
+ colors = append(colors, color)
+ }
+ f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
+ return f
+}
+
+func (f Formatter) F(format string, args ...interface{}) string {
+ return f.Fi(0, format, args...)
+}
+
+func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+ return f.Fiw(indentation, 0, format, args...)
+}
+
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ out := f.style(format)
+ if len(args) > 0 {
+ out = fmt.Sprintf(out, args...)
+ }
+
+ if indentation == 0 && maxWidth == 0 {
+ return out
+ }
+
+ lines := strings.Split(out, "\n")
+
+ if maxWidth != 0 {
+ outLines := []string{}
+
+ maxWidth = maxWidth - indentation*2
+ for _, line := range lines {
+ if f.length(line) <= maxWidth {
+ outLines = append(outLines, line)
+ continue
+ }
+ words := strings.Split(line, " ")
+ outWords := []string{words[0]}
+ length := uint(f.length(words[0]))
+ for _, word := range words[1:] {
+ wordLength := f.length(word)
+ if length+wordLength+1 <= maxWidth {
+ length += wordLength + 1
+ outWords = append(outWords, word)
+ continue
+ }
+ outLines = append(outLines, strings.Join(outWords, " "))
+ outWords = []string{word}
+ length = wordLength
+ }
+ if len(outWords) > 0 {
+ outLines = append(outLines, strings.Join(outWords, " "))
+ }
+ }
+
+ lines = outLines
+ }
+
+ if indentation == 0 {
+ return strings.Join(lines, "\n")
+ }
+
+ padding := strings.Repeat(" ", int(indentation))
+ for i := range lines {
+ if lines[i] != "" {
+ lines[i] = padding + lines[i]
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func (f Formatter) length(styled string) uint {
+ n := uint(0)
+ inStyle := false
+ for _, b := range styled {
+ if inStyle {
+ if b == 'm' {
+ inStyle = false
+ }
+ continue
+ }
+ if b == '\x1b' {
+ inStyle = true
+ continue
+ }
+ n += 1
+ }
+ return n
+}
+
+func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
+ if len(elements) == 0 {
+ return ""
+ }
+ n := len(cycle)
+ out := ""
+ for i, text := range elements {
+ out += cycle[i%n] + text
+ if i < len(elements)-1 {
+ out += joiner
+ }
+ }
+ out += "{{/}}"
+ return f.style(out)
+}
+
+func (f Formatter) style(s string) string {
+ switch f.ColorMode {
+ case ColorModeNone:
+ return f.styleRe.ReplaceAllString(s, "")
+ case ColorModePassthrough:
+ return s
+ case ColorModeTerminal:
+ return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
+ if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
+ return out
+ }
+ return match
+ })
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
new file mode 100644
index 000000000..fd1726084
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -0,0 +1,76 @@
+package build
+
+import (
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBuildCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "build",
+ Flags: flags,
+ Usage: "ginkgo build ",
+ ShortDoc: "Build the passed in (or the package in the current directory if left blank).",
+ DocLink: "precompiling-suites",
+ Command: func(args []string, _ []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ buildSpecs(args, cliConfig, goFlagsConfig)
+ },
+ }
+}
+
+func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, goFlagsConfig)
+
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break
+ }
+ suites[suiteIdx] = suite
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ } else {
+ if len(goFlagsConfig.O) == 0 {
+ goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
+ } else {
+ stat, err := os.Stat(goFlagsConfig.O)
+ if err != nil {
+ panic(err)
+ }
+ if stat.IsDir() {
+ goFlagsConfig.O += "/" + suite.PackageName + ".test"
+ }
+ }
+ fmt.Printf("Compiled %s\n", goFlagsConfig.O)
+ }
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
+ command.AbortWith("Failed to compile all tests")
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
new file mode 100644
index 000000000..2efd28608
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -0,0 +1,61 @@
+package command
+
+import "fmt"
+
+type AbortDetails struct {
+ ExitCode int
+ Error error
+ EmitUsage bool
+}
+
+func Abort(details AbortDetails) {
+ panic(details)
+}
+
+func AbortGracefullyWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 0,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWithUsage(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: true,
+ })
+}
+
+func AbortIfError(preamble string, err error) {
+ if err != nil {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
+ EmitUsage: false,
+ })
+ }
+}
+
+func AbortIfErrors(preamble string, errors []error) {
+ if len(errors) > 0 {
+ out := ""
+ for _, err := range errors {
+ out += err.Error()
+ }
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, out),
+ EmitUsage: false,
+ })
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
new file mode 100644
index 000000000..12e0e5659
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Command struct {
+ Name string
+ Flags types.GinkgoFlagSet
+ Usage string
+ ShortDoc string
+ Documentation string
+ DocLink string
+ Command func(args []string, additionalArgs []string)
+}
+
+func (c Command) Run(args []string, additionalArgs []string) {
+ args, err := c.Flags.Parse(args)
+ if err != nil {
+ AbortWithUsage(err.Error())
+ }
+
+ c.Command(args, additionalArgs)
+}
+
+func (c Command) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
+ if c.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
+ fmt.Fprintln(writer, "")
+ }
+ if c.Documentation != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
+ fmt.Fprintln(writer, "")
+ }
+ if c.DocLink != "" {
+ fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
+ fmt.Fprintln(writer, "")
+ }
+ flagUsage := c.Flags.Usage()
+ if flagUsage != "" {
+ fmt.Fprintf(writer, formatter.F(flagUsage))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
new file mode 100644
index 000000000..88dd8d6b0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -0,0 +1,182 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Program struct {
+ Name string
+ Heading string
+ Commands []Command
+ DefaultCommand Command
+ DeprecatedCommands []DeprecatedCommand
+
+ //For testing - leave as nil in production
+ OutWriter io.Writer
+ ErrWriter io.Writer
+ Exiter func(code int)
+}
+
+type DeprecatedCommand struct {
+ Name string
+ Deprecation types.Deprecation
+}
+
+func (p Program) RunAndExit(osArgs []string) {
+ var command Command
+ deprecationTracker := types.NewDeprecationTracker()
+ if p.Exiter == nil {
+ p.Exiter = os.Exit
+ }
+ if p.OutWriter == nil {
+ p.OutWriter = formatter.ColorableStdOut
+ }
+ if p.ErrWriter == nil {
+ p.ErrWriter = formatter.ColorableStdErr
+ }
+
+ defer func() {
+ exitCode := 0
+
+ if r := recover(); r != nil {
+ details, ok := r.(AbortDetails)
+ if !ok {
+ panic(r)
+ }
+
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
+ fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
+ }
+ if details.EmitUsage {
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, "")
+ }
+ command.EmitUsage(p.ErrWriter)
+ }
+ exitCode = details.ExitCode
+ }
+
+ command.Flags.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
+ }
+ p.Exiter(exitCode)
+ return
+ }()
+
+ args, additionalArgs := []string{}, []string{}
+
+ foundDelimiter := false
+ for _, arg := range osArgs[1:] {
+ if !foundDelimiter {
+ if arg == "--" {
+ foundDelimiter = true
+ continue
+ }
+ }
+
+ if foundDelimiter {
+ additionalArgs = append(additionalArgs, arg)
+ } else {
+ args = append(args, arg)
+ }
+ }
+
+ command = p.DefaultCommand
+ if len(args) > 0 {
+ p.handleHelpRequestsAndExit(p.OutWriter, args)
+ if command.Name == args[0] {
+ args = args[1:]
+ } else {
+ for _, deprecatedCommand := range p.DeprecatedCommands {
+ if deprecatedCommand.Name == args[0] {
+ deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
+ return
+ }
+ }
+ for _, tryCommand := range p.Commands {
+ if tryCommand.Name == args[0] {
+ command, args = tryCommand, args[1:]
+ break
+ }
+ }
+ }
+ }
+
+ command.Run(args, additionalArgs)
+}
+
+func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
+ if len(args) == 0 {
+ return
+ }
+
+ matchesHelpFlag := func(args ...string) bool {
+ for _, arg := range args {
+ if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
+ return true
+ }
+ }
+ return false
+ }
+ if len(args) == 1 {
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ p.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ } else {
+ var name string
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ name = args[1]
+ } else if matchesHelpFlag(args[1:]...) {
+ name = args[0]
+ } else {
+ return
+ }
+
+ if p.DefaultCommand.Name == name || p.Name == name {
+ p.DefaultCommand.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ for _, command := range p.Commands {
+ if command.Name == name {
+ command.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ }
+
+ fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
+ fmt.Fprintln(writer, "")
+ p.EmitUsage(writer)
+ Abort(AbortDetails{ExitCode: 1})
+ }
+ return
+}
+
+func (p Program) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F(p.Heading))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
+ fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
+ fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
+ fmt.Fprintln(writer, "")
+ fmt.Fprintln(writer, formatter.F("The following commands are available:"))
+
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
+ if p.DefaultCommand.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
+ }
+
+ for _, command := range p.Commands {
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
+ if command.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
+ }
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
new file mode 100644
index 000000000..a367a1fc9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
@@ -0,0 +1,48 @@
+package generators
+
+var bootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = {{.GinkgoPackage}}BeforeSuite(func() {
+ // Choose a WebDriver:
+
+ agoutiDriver = agouti.PhantomJS()
+ // agoutiDriver = agouti.Selenium()
+ // agoutiDriver = agouti.ChromeDriver()
+
+ {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
+})
+
+var _ = {{.GinkgoPackage}}AfterSuite(func() {
+ {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
new file mode 100644
index 000000000..b2dc59be6
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -0,0 +1,133 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBootstrapCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "bootstrap",
+ Usage: "ginkgo bootstrap",
+ ShortDoc: "Bootstrap a test suite for the current package",
+ Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
+
+{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(_ []string, _ []string) {
+ generateBootstrap(conf)
+ },
+ }
+}
+
+type bootstrapData struct {
+ Package string
+ FormattedName string
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateBootstrap(conf GeneratorsConfig) {
+ packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+
+ data := bootstrapData{
+ Package: determinePackageName(packageName, conf.Internal),
+ FormattedName: formattedName,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom bootstrap file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom boostrap data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiBootstrapText
+ } else {
+ templateText = bootstrapText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to parse bootstrap template:", err)
+
+ buf := &bytes.Buffer{}
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = bootstrapTemplate.Execute(buf, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+
+ buf.WriteTo(f)
+
+ internal.GoFmt(targetFile)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
new file mode 100644
index 000000000..cf3b7cb6d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -0,0 +1,265 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildGenerateCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, generate will create a test file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the test file template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
+ {Name: "tags", KeyPath: "Tags",
+ UsageArgument: "build-tags",
+ Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "generate",
+ Usage: "ginkgo generate ",
+ ShortDoc: "Generate a test file named _test.go",
+ Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created.
+
+You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go.
+
+You can also pass a of the form "file.go" and generate will emit "file_test.go".`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ generateTestFiles(conf, args)
+ },
+ }
+}
+
+type specData struct {
+ BuildTags string
+ Package string
+ Subject string
+ PackageImportPath string
+ ImportPackage bool
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateTestFiles(conf GeneratorsConfig, args []string) {
+ subjects := args
+ if len(subjects) == 0 {
+ subjects = []string{""}
+ }
+ for _, subject := range subjects {
+ generateTestFileForSubject(subject, conf)
+ }
+}
+
+func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
+ packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+ if subject != "" {
+ specFilePrefix = formatSubject(subject)
+ formattedName = prettifyName(specFilePrefix)
+ }
+
+ if conf.Internal {
+ specFilePrefix = specFilePrefix + "_internal"
+ }
+
+ data := specData{
+ BuildTags: getBuildTags(conf.Tags),
+ Package: determinePackageName(packageName, conf.Internal),
+ Subject: formattedName,
+ PackageImportPath: getPackageImportPath(),
+ ImportPackage: !conf.Internal,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create test file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom template file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom template data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiSpecText
+ } else {
+ templateText = specText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to read parse test template:", err)
+
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = specTemplate.Execute(f, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+ internal.GoFmt(targetFile)
+}
+
+func formatSubject(name string) string {
+ name = strings.ReplaceAll(name, "-", "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.Split(name, ".go")[0]
+ name = strings.Split(name, "_test")[0]
+ return name
+}
+
+// moduleName returns module name from go.mod from given module root directory
+func moduleName(modRoot string) string {
+ modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
+ if err != nil {
+ return ""
+ }
+ defer modFile.Close()
+
+ mod := make([]byte, 128)
+ _, err = modFile.Read(mod)
+ if err != nil {
+ return ""
+ }
+
+ slashSlash := []byte("//")
+ moduleStr := []byte("module")
+
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+
+ return "" // missing module path
+}
+
+func findModuleRoot(dir string) (root string) {
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
+func getPackageImportPath() string {
+ workingDir, err := os.Getwd()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ sep := string(filepath.Separator)
+
+ // Try go.mod file first
+ modRoot := findModuleRoot(workingDir)
+ if modRoot != "" {
+ modName := moduleName(modRoot)
+ if modName != "" {
+ cd := strings.ReplaceAll(workingDir, modRoot, "")
+ cd = strings.ReplaceAll(cd, sep, "/")
+ return modName + cd
+ }
+ }
+
+ // Fallback to GOPATH structure
+ paths := strings.Split(workingDir, sep+"src"+sep)
+ if len(paths) == 1 {
+ fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+ return "UNKNOWN_PACKAGE_PATH"
+ }
+ return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
new file mode 100644
index 000000000..4dab07d03
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
@@ -0,0 +1,43 @@
+package generators
+
+var specText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+ . "github.com/sclevine/agouti/matchers"
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+ var page *agouti.Page
+
+ {{.GinkgoPackage}}BeforeEach(func() {
+ var err error
+ page, err = agoutiDriver.NewPage()
+ {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
+ })
+
+ {{.GinkgoPackage}}AfterEach(func() {
+ {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
+ })
+})
+`
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
new file mode 100644
index 000000000..28c7aa6f4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
@@ -0,0 +1,76 @@
+package generators
+
+import (
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+type GeneratorsConfig struct {
+ Agouti, NoDot, Internal bool
+ CustomTemplate string
+ CustomTemplateData string
+ Tags string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+ path, err := os.Getwd()
+ command.AbortIfError("Could not get current working directory:", err)
+
+ dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
+ dirName = strings.ReplaceAll(dirName, " ", "_")
+
+ pkg, err := build.ImportDir(path, 0)
+ packageName := pkg.Name
+ if err != nil {
+ packageName = ensureLegalPackageName(dirName)
+ }
+
+ formattedName := prettifyName(filepath.Base(path))
+ return packageName, dirName, formattedName
+}
+
+func ensureLegalPackageName(name string) string {
+ if name == "_" {
+ return "underscore"
+ }
+ if len(name) == 0 {
+ return "empty"
+ }
+ n, isDigitErr := strconv.Atoi(string(name[0]))
+ if isDigitErr == nil {
+ return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
+ }
+ return name
+}
+
+func prettifyName(name string) string {
+ name = strings.ReplaceAll(name, "-", " ")
+ name = strings.ReplaceAll(name, "_", " ")
+ name = strings.Title(name)
+ name = strings.ReplaceAll(name, " ", "")
+ return name
+}
+
+func determinePackageName(name string, internal bool) string {
+ if internal {
+ return name
+ }
+
+ return name + "_test"
+}
+
+// getBuildTags returns the resultant string to be added.
+// If the input string is not empty, then returns a `//go:build {}` string,
+// otherwise returns an empty string.
+func getBuildTags(tags string) string {
+ if tags != "" {
+ return fmt.Sprintf("//go:build %s\n", tags)
+ }
+ return ""
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
new file mode 100644
index 000000000..48827cc5e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -0,0 +1,173 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ if suite.PathToCompiledTest != "" {
+ return suite
+ }
+
+ suite.CompilationError = nil
+
+ path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
+ return suite
+ }
+
+ if len(goFlagsConfig.O) > 0 {
+ userDefinedPath, err := filepath.Abs(goFlagsConfig.O)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error())
+ return suite
+ }
+ path = userDefinedPath
+ }
+
+ goFlagsConfig.O = path
+
+ ginkgoInvocationPath, _ := os.Getwd()
+ ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
+ packagePath := suite.AbsPath()
+ pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
+ return suite
+ }
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
+ return suite
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if len(output) > 0 {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
+ } else {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
+ }
+ return suite
+ }
+
+ if strings.Contains(string(output), "[no test files]") {
+ suite.State = TestSuiteStateSkippedDueToEmptyCompilation
+ return suite
+ }
+
+ if len(output) > 0 {
+ fmt.Println(string(output))
+ }
+
+ if !FileExists(path) {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
+ return suite
+ }
+
+ suite.State = TestSuiteStateCompiled
+ suite.PathToCompiledTest = path
+ return suite
+}
+
+func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
+ if goFlagsConfig.BinaryMustBePreserved() {
+ return
+ }
+ for _, suite := range suites {
+ if !suite.Precompiled {
+ os.Remove(suite.PathToCompiledTest)
+ }
+ }
+}
+
+type parallelSuiteBundle struct {
+ suite TestSuite
+ compiled chan TestSuite
+}
+
+type OrderedParallelCompiler struct {
+ mutex *sync.Mutex
+ stopped bool
+ numCompilers int
+
+ idx int
+ numSuites int
+ completionChannels []chan TestSuite
+}
+
+func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
+ return &OrderedParallelCompiler{
+ mutex: &sync.Mutex{},
+ numCompilers: numCompilers,
+ }
+}
+
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+ opc.stopped = false
+ opc.idx = 0
+ opc.numSuites = len(suites)
+ opc.completionChannels = make([]chan TestSuite, opc.numSuites)
+
+ toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
+ for compiler := 0; compiler < opc.numCompilers; compiler++ {
+ go func() {
+ for bundle := range toCompile {
+ c, suite := bundle.compiled, bundle.suite
+ opc.mutex.Lock()
+ stopped := opc.stopped
+ opc.mutex.Unlock()
+ if !stopped {
+ suite = CompileSuite(suite, goFlagsConfig)
+ }
+ c <- suite
+ }
+ }()
+ }
+
+ for idx, suite := range suites {
+ opc.completionChannels[idx] = make(chan TestSuite, 1)
+ toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
+ if idx == 0 { //compile first suite serially
+ suite = <-opc.completionChannels[0]
+ opc.completionChannels[0] <- suite
+ }
+ }
+
+ close(toCompile)
+}
+
+func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
+ if opc.idx >= opc.numSuites {
+ return opc.numSuites, TestSuite{}
+ }
+
+ idx := opc.idx
+ suite := <-opc.completionChannels[idx]
+ opc.idx = opc.idx + 1
+
+ return idx, suite
+}
+
+func (opc *OrderedParallelCompiler) StopAndDrain() {
+ opc.mutex.Lock()
+ opc.stopped = true
+ opc.mutex.Unlock()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
new file mode 100644
index 000000000..3c5079ff4
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2015, Wade Simmons
+// All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+
+// this file was originally taken from the gocovmerge project
+// see also: https://go.shabbyrobe.org/gocovmerge
+package internal
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeCoverProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
new file mode 100644
index 000000000..8e16d2bb0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -0,0 +1,227 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+
+ "github.com/google/pprof/profile"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/cover"
+)
+
+func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
+ suffix := ""
+ if process != 0 {
+ suffix = fmt.Sprintf(".%d", process)
+ }
+ if cliConfig.OutputDir == "" {
+ return filepath.Join(suite.AbsPath(), assetName+suffix)
+ }
+ outputDir, _ := filepath.Abs(cliConfig.OutputDir)
+ return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
+}
+
+func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
+ messages := []string{}
+ suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
+
+ // merge cover profiles if need be
+ if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
+ coverProfiles := []string{}
+ for _, suite := range suitesWithProfiles {
+ if !suite.HasProgrammaticFocus {
+ coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ dst := goFlagsConfig.CoverProfile
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
+ }
+ err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
+ if err != nil {
+ return messages, err
+ }
+ coverage, err := GetCoverageFromCoverProfile(dst)
+ if err != nil {
+ return messages, err
+ }
+ if coverage == 0 {
+ messages = append(messages, "composite coverage: [no statements]")
+ } else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
+ } else {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
+ }
+ } else {
+ messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
+ }
+ }
+
+ // copy binaries if need be
+ for _, suite := range suitesWithProfiles {
+ if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
+ src := suite.PathToCompiledTest
+ dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
+ if suite.Precompiled {
+ if err := CopyFile(src, dst); err != nil {
+ return messages, err
+ }
+ } else {
+ if err := os.Rename(src, dst); err != nil {
+ return messages, err
+ }
+ }
+ }
+ }
+
+ type reportFormat struct {
+ ReportName string
+ GenerateFunc func(types.Report, string) error
+ MergeFunc func([]string, string) ([]string, error)
+ }
+ reportFormats := []reportFormat{}
+ if reporterConfig.JSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
+ }
+ if reporterConfig.JUnitReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
+ }
+
+ // Generate reports for suites that failed to run
+ reportableSuites := suites.ThatAreGinkgoSuites()
+ for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
+ report := types.Report{
+ SuitePath: suite.AbsPath(),
+ SuiteConfig: suiteConfig,
+ SuiteSucceeded: false,
+ }
+ switch suite.State {
+ case TestSuiteStateFailedToCompile:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
+ case TestSuiteStateFailedDueToTimeout:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToPriorFailures:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToEmptyCompilation:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
+ report.SuiteSucceeded = true
+ }
+
+ for _, format := range reportFormats {
+ format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ }
+
+ // Merge reports unless we've been asked to keep them separate
+ if !cliConfig.KeepSeparateReports {
+ for _, format := range reportFormats {
+ reports := []string{}
+ for _, suite := range reportableSuites {
+ reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ dst := format.ReportName
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
+ }
+ mergeMessages, err := format.MergeFunc(reports, dst)
+ messages = append(messages, mergeMessages...)
+ if err != nil {
+ return messages, err
+ }
+ }
+ }
+
+ return messages, nil
+}
+
+// loads each profile, merges them, deletes them, stores them in destination
+func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
+ var merged []*cover.Profile
+ for _, file := range profiles {
+ parsedProfiles, err := cover.ParseProfiles(file)
+ if err != nil {
+ return err
+ }
+ os.Remove(file)
+ for _, p := range parsedProfiles {
+ merged = AddCoverProfile(merged, p)
+ }
+ }
+ dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ err = DumpCoverProfiles(merged, dst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetCoverageFromCoverProfile(profile string) (float64, error) {
+ cmd := exec.Command("go", "tool", "cover", "-func", profile)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
+ }
+ re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
+ matches := re.FindStringSubmatch(string(output))
+ if matches == nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
+ }
+ coverageString := matches[1]
+ coverage, err := strconv.ParseFloat(coverageString, 64)
+ if err != nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
+ }
+
+ return coverage, nil
+}
+
+func MergeProfiles(profilePaths []string, destination string) error {
+ profiles := []*profile.Profile{}
+ for _, profilePath := range profilePaths {
+ proFile, err := os.Open(profilePath)
+ if err != nil {
+ return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
+ }
+ prof, err := profile.Parse(proFile)
+ _ = proFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
+ }
+ profiles = append(profiles, prof)
+ os.Remove(profilePath)
+ }
+
+ mergedProfile, err := profile.Merge(profiles)
+ if err != nil {
+ return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
+ }
+
+ outFile, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
+ }
+ err = mergedProfile.Write(outFile)
+ if err != nil {
+ return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
+ }
+ err = outFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
new file mode 100644
index 000000000..41052ea19
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -0,0 +1,355 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ suite.State = TestSuiteStateFailed
+ suite.HasProgrammaticFocus = false
+
+ if suite.PathToCompiledTest == "" {
+ return suite
+ }
+
+ if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
+ suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else if suite.IsGinkgo {
+ suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else {
+ suite = runGoTest(suite, cliConfig, goFlagsConfig)
+ }
+ runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
+ return suite
+}
+
+func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
+ buf := &bytes.Buffer{}
+ cmd := exec.Command(suite.PathToCompiledTest, args...)
+ cmd.Dir = suite.Path
+ if pipeToStdout {
+ cmd.Stderr = io.MultiWriter(os.Stdout, buf)
+ cmd.Stdout = os.Stdout
+ } else {
+ cmd.Stderr = buf
+ cmd.Stdout = buf
+ }
+ err := cmd.Start()
+ command.AbortIfError("Failed to start test suite", err)
+
+ return cmd, buf
+}
+
+func checkForNoTestsWarning(buf *bytes.Buffer) bool {
+ if strings.Contains(buf.String(), "warning: no tests to run") {
+ fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
+ return true
+ }
+ return false
+}
+
+func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ // As we run the go test from the suite directory, make sure the cover profile is absolute
+ // and placed into the expected output directory when one is configured.
+ if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ return suite
+}
+
+func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ if goFlagsConfig.Cover {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ }
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ if suite.HasProgrammaticFocus {
+ if goFlagsConfig.Cover {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MemProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ }
+ }
+
+ return suite
+}
+
+func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ type procResult struct {
+ passed bool
+ hasProgrammaticFocus bool
+ }
+
+ numProcs := cliConfig.ComputedProcs()
+ procOutput := make([]*bytes.Buffer, numProcs)
+ coverProfiles := []string{}
+
+ blockProfiles := []string{}
+ cpuProfiles := []string{}
+ memProfiles := []string{}
+ mutexProfiles := []string{}
+
+ procResults := make(chan procResult)
+
+ server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
+ command.AbortIfError("Failed to start parallel spec server", err)
+ server.Start()
+ defer server.Close()
+
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ for proc := 1; proc <= numProcs; proc++ {
+ procGinkgoConfig := ginkgoConfig
+ procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
+
+ procGoFlagsConfig := goFlagsConfig
+ if goFlagsConfig.Cover {
+ procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
+ coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
+ blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
+ cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
+ memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
+ mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, false)
+ procOutput[proc-1] = buf
+ server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
+
+ go func() {
+ cmd.Wait()
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ procResults <- procResult{
+ passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
+ hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
+ }
+ }()
+ }
+
+ passed := true
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ result := <-procResults
+ passed = passed && result.passed
+ suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
+ }
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ select {
+ case <-server.GetSuiteDone():
+ fmt.Println("")
+ case <-time.After(time.Second):
+ //one of the nodes never finished reporting to the server. Something must have gone wrong.
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
+ fmt.Fprintln(formatter.ColorableStdErr, " ")
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
+ fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
+ }
+ fmt.Fprintf(os.Stderr, "** End **")
+ }
+
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ output := procOutput[proc-1].String()
+ if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
+ suite.State = TestSuiteStateFailed
+ }
+ if strings.Contains(output, "deprecated Ginkgo functionality") {
+ fmt.Fprintln(os.Stderr, output)
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ } else {
+ coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
+ command.AbortIfError("Failed to combine cover profiles", err)
+
+ coverage, err := GetCoverageFromCoverProfile(coverProfile)
+ command.AbortIfError("Failed to compute coverage", err)
+ if coverage == 0 {
+ fmt.Fprintln(os.Stdout, "coverage: [no statements]")
+ } else {
+ fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
+ }
+ }
+ }
+ if len(blockProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ } else {
+ blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ err := MergeProfiles(blockProfiles, blockProfile)
+ command.AbortIfError("Failed to combine blockprofiles", err)
+ }
+ }
+ if len(cpuProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ } else {
+ cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ err := MergeProfiles(cpuProfiles, cpuProfile)
+ command.AbortIfError("Failed to combine cpuprofiles", err)
+ }
+ }
+ if len(memProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ } else {
+ memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ err := MergeProfiles(memProfiles, memProfile)
+ command.AbortIfError("Failed to combine memprofiles", err)
+ }
+ }
+ if len(mutexProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ } else {
+ mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ err := MergeProfiles(mutexProfiles, mutexProfile)
+ command.AbortIfError("Failed to combine mutexprofiles", err)
+ }
+ }
+
+ return suite
+}
+
+func runAfterRunHook(command string, noColor bool, suite TestSuite) {
+ if command == "" {
+ return
+ }
+ f := formatter.NewWithNoColorBool(noColor)
+
+ // Allow for string replacement to pass input to the command
+ passed := "[FAIL]"
+ if suite.State.Is(TestSuiteStatePassed) {
+ passed = "[PASS]"
+ }
+ command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
+ command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
+
+ // Must break command into parts
+ splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+ parts := splitArgs.FindAllString(command, -1)
+
+ output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+ if err != nil {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
new file mode 100644
index 000000000..df99875be
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
@@ -0,0 +1,284 @@
+package internal
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
+const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
+const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
+
+type TestSuiteState uint
+
+const (
+ TestSuiteStateInvalid TestSuiteState = iota
+
+ TestSuiteStateUncompiled
+ TestSuiteStateCompiled
+
+ TestSuiteStatePassed
+
+ TestSuiteStateSkippedDueToEmptyCompilation
+ TestSuiteStateSkippedByFilter
+ TestSuiteStateSkippedDueToPriorFailures
+
+ TestSuiteStateFailed
+ TestSuiteStateFailedDueToTimeout
+ TestSuiteStateFailedToCompile
+)
+
+var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
+
+func (state TestSuiteState) Is(states ...TestSuiteState) bool {
+ for _, suiteState := range states {
+ if suiteState == state {
+ return true
+ }
+ }
+
+ return false
+}
+
+type TestSuite struct {
+ Path string
+ PackageName string
+ IsGinkgo bool
+
+ Precompiled bool
+ PathToCompiledTest string
+ CompilationError error
+
+ HasProgrammaticFocus bool
+ State TestSuiteState
+}
+
+func (ts TestSuite) AbsPath() string {
+ path, _ := filepath.Abs(ts.Path)
+ return path
+}
+
+func (ts TestSuite) NamespacedName() string {
+ name := relPath(ts.Path)
+ name = strings.TrimLeft(name, "."+string(filepath.Separator))
+ name = strings.ReplaceAll(name, string(filepath.Separator), "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ if name == "" {
+ return ts.PackageName
+ }
+ return name
+}
+
+type TestSuites []TestSuite
+
+func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
+ for _, suite := range ts {
+ if suite.HasProgrammaticFocus {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.IsGinkgo {
+ out = append(out, suite)
+ }
+ }
+ return out
+}
+
+func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
+ n := 0
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ n += 1
+ }
+ }
+
+ return n
+}
+
+func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if !suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
+ out := make(TestSuites, len(ts))
+ permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
+ for i, j := range permutation {
+ out[i] = ts[j]
+ }
+ return out
+}
+
+func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
+ suites := TestSuites{}
+
+ if len(args) > 0 {
+ for _, arg := range args {
+ if allowPrecompiled {
+ suite, err := precompiledTestSuite(arg)
+ if err == nil {
+ suites = append(suites, suite)
+ continue
+ }
+ }
+ recurseForSuite := cliConfig.Recurse
+ if strings.HasSuffix(arg, "/...") && arg != "/..." {
+ arg = arg[:len(arg)-4]
+ recurseForSuite = true
+ }
+ suites = append(suites, suitesInDir(arg, recurseForSuite)...)
+ }
+ } else {
+ suites = suitesInDir(".", cliConfig.Recurse)
+ }
+
+ if cliConfig.SkipPackage != "" {
+ skipFilters := strings.Split(cliConfig.SkipPackage, ",")
+ for idx := range suites {
+ for _, skipFilter := range skipFilters {
+ if strings.Contains(suites[idx].Path, skipFilter) {
+ suites[idx].State = TestSuiteStateSkippedByFilter
+ break
+ }
+ }
+ }
+ }
+
+ return suites
+}
+
+func precompiledTestSuite(path string) (TestSuite, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ if info.IsDir() {
+ return TestSuite{}, errors.New("this is a directory, not a file")
+ }
+
+ if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
+ return TestSuite{}, errors.New("this is not a .test binary")
+ }
+
+ if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
+ return TestSuite{}, errors.New("this is not executable")
+ }
+
+ dir := relPath(filepath.Dir(path))
+ packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
+ packageName = strings.TrimSuffix(packageName, ".test")
+
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ return TestSuite{
+ Path: dir,
+ PackageName: packageName,
+ IsGinkgo: true,
+ Precompiled: true,
+ PathToCompiledTest: path,
+ State: TestSuiteStateCompiled,
+ }, nil
+}
+
+func suitesInDir(dir string, recurse bool) TestSuites {
+ suites := TestSuites{}
+
+ if path.Base(dir) == "vendor" {
+ return suites
+ }
+
+ files, _ := os.ReadDir(dir)
+ re := regexp.MustCompile(`^[^._].*_test\.go$`)
+ for _, file := range files {
+ if !file.IsDir() && re.MatchString(file.Name()) {
+ suite := TestSuite{
+ Path: relPath(dir),
+ PackageName: packageNameForSuite(dir),
+ IsGinkgo: filesHaveGinkgoSuite(dir, files),
+ State: TestSuiteStateUncompiled,
+ }
+ suites = append(suites, suite)
+ break
+ }
+ }
+
+ if recurse {
+ re = regexp.MustCompile(`^[._]`)
+ for _, file := range files {
+ if file.IsDir() && !re.MatchString(file.Name()) {
+ suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
+ }
+ }
+ }
+
+ return suites
+}
+
+func relPath(dir string) string {
+ dir, _ = filepath.Abs(dir)
+ cwd, _ := os.Getwd()
+ dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+
+ if string(dir[0]) != "." {
+ dir = "." + string(filepath.Separator) + dir
+ }
+
+ return dir
+}
+
+func packageNameForSuite(dir string) string {
+ path, _ := filepath.Abs(dir)
+ return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
+ reTestFile := regexp.MustCompile(`_test\.go$`)
+ reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
+
+ for _, file := range files {
+ if !file.IsDir() && reTestFile.MatchString(file.Name()) {
+ contents, _ := os.ReadFile(dir + "/" + file.Name())
+ if reGinkgo.Match(contents) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
new file mode 100644
index 000000000..bd9ca7d51
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
@@ -0,0 +1,86 @@
+package internal
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func FileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func CopyFile(src string, dest string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(dest); err == nil {
+ os.Remove(dest)
+ }
+
+ destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(destFile, srcFile)
+ if err != nil {
+ return err
+ }
+
+ if err := srcFile.Close(); err != nil {
+ return err
+ }
+ return destFile.Close()
+}
+
+func GoFmt(path string) {
+ out, err := exec.Command("go", "fmt", path).CombinedOutput()
+ if err != nil {
+ command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
+ }
+}
+
+func PluralizedWord(singular, plural string, count int) string {
+ if count == 1 {
+ return singular
+ }
+ return plural
+}
+
+func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
+ out := ""
+ out += "There were failures detected in the following suites:\n"
+
+ maxPackageNameLength := 0
+ for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
+ if len(suite.PackageName) > maxPackageNameLength {
+ maxPackageNameLength = len(suite.PackageName)
+ }
+ }
+
+ packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+ for _, suite := range suites {
+ switch suite.State {
+ case TestSuiteStateFailed:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedToCompile:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedDueToTimeout:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
new file mode 100644
index 000000000..9da1bab3d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
+
+func VerifyCLIAndFrameworkVersion(suites TestSuites) {
+ cliVersion := types.VERSION
+ mismatches := map[string][]string{}
+
+ for _, suite := range suites {
+ cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ continue
+ }
+ components := strings.Split(string(output), " ")
+ if len(components) != 2 {
+ continue
+ }
+ matches := versiorRe.FindStringSubmatch(components[1])
+ if matches == nil || len(matches) != 2 {
+ continue
+ }
+ libraryVersion := matches[1]
+ if cliVersion != libraryVersion {
+ mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
+ }
+ }
+
+ if len(mismatches) == 0 {
+ return
+ }
+
+ fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
+
+ fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
+ fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
+ for version, packages := range mismatches {
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
+ }
+ fmt.Println("")
+ fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
new file mode 100644
index 000000000..6c61f09d1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
@@ -0,0 +1,123 @@
+package labels
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func BuildLabelsCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+
+ flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "labels",
+ Usage: "ginkgo labels ",
+ Flags: flags,
+ ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
+ DocLink: "spec-labels",
+ Command: func(args []string, _ []string) {
+ ListLabels(args, cliConfig)
+ },
+ }
+}
+
+func ListLabels(args []string, cliConfig types.CLIConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+ for _, suite := range suites {
+ labels := fetchLabelsFromPackage(suite.Path)
+ if len(labels) == 0 {
+ fmt.Printf("%s: No labels found\n", suite.PackageName)
+ } else {
+ fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
+ }
+ }
+}
+
+func fetchLabelsFromPackage(packagePath string) []string {
+ fset := token.NewFileSet()
+ parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
+ command.AbortIfError("Failed to parse package source:", err)
+
+ files := []*ast.File{}
+ hasTestPackage := false
+ for key, pkg := range parsedPackages {
+ if strings.HasSuffix(key, "_test") {
+ hasTestPackage = true
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+ if !hasTestPackage {
+ for _, pkg := range parsedPackages {
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+
+ seen := map[string]bool{}
+ labels := []string{}
+ ispr := inspector.New(files)
+ ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
+ potentialLabels := fetchLabels(n.(*ast.CallExpr))
+ for _, label := range potentialLabels {
+ if !seen[label] {
+ seen[label] = true
+ labels = append(labels, strconv.Quote(label))
+ }
+ }
+ })
+
+ sort.Strings(labels)
+ return labels
+}
+
+func fetchLabels(callExpr *ast.CallExpr) []string {
+ out := []string{}
+ switch expr := callExpr.Fun.(type) {
+ case *ast.Ident:
+ if expr.Name != "Label" {
+ return out
+ }
+ case *ast.SelectorExpr:
+ if expr.Sel.Name != "Label" {
+ return out
+ }
+ default:
+ return out
+ }
+ for _, arg := range callExpr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
new file mode 100644
index 000000000..e9abb27d8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/build"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/generators"
+ "github.com/onsi/ginkgo/v2/ginkgo/labels"
+ "github.com/onsi/ginkgo/v2/ginkgo/outline"
+ "github.com/onsi/ginkgo/v2/ginkgo/run"
+ "github.com/onsi/ginkgo/v2/ginkgo/unfocus"
+ "github.com/onsi/ginkgo/v2/ginkgo/watch"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var program command.Program
+
+func GenerateCommands() []command.Command {
+ return []command.Command{
+ watch.BuildWatchCommand(),
+ build.BuildBuildCommand(),
+ generators.BuildBootstrapCommand(),
+ generators.BuildGenerateCommand(),
+ labels.BuildLabelsCommand(),
+ outline.BuildOutlineCommand(),
+ unfocus.BuildUnfocusCommand(),
+ BuildVersionCommand(),
+ }
+}
+
+func main() {
+ program = command.Program{
+ Name: "ginkgo",
+ Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
+ Commands: GenerateCommands(),
+ DefaultCommand: run.BuildRunCommand(),
+ DeprecatedCommands: []command.DeprecatedCommand{
+ {Name: "convert", Deprecation: types.Deprecations.Convert()},
+ {Name: "blur", Deprecation: types.Deprecations.Blur()},
+ {Name: "nodot", Deprecation: types.Deprecations.Nodot()},
+ },
+ }
+
+ program.RunAndExit(os.Args)
+}
+
+func BuildVersionCommand() command.Command {
+ return command.Command{
+ Name: "version",
+ Usage: "ginkgo version",
+ ShortDoc: "Print Ginkgo's version",
+ Command: func(_ []string, _ []string) {
+ fmt.Printf("Ginkgo Version %s\n", types.VERSION)
+ },
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
new file mode 100644
index 000000000..5d8d00bb1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -0,0 +1,301 @@
+package outline
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // undefinedTextAlt is used if the spec/container text cannot be derived
+ undefinedTextAlt = "undefined"
+)
+
+// ginkgoMetadata holds useful bits of information for every entry in the outline
+type ginkgoMetadata struct {
+ // Name is the spec or container function name, e.g. `Describe` or `It`
+ Name string `json:"name"`
+
+ // Text is the `text` argument passed to specs, and some containers
+ Text string `json:"text"`
+
+ // Start is the position of first character of the spec or container block
+ Start int `json:"start"`
+
+ // End is the position of first character immediately after the spec or container block
+ End int `json:"end"`
+
+ Spec bool `json:"spec"`
+ Focused bool `json:"focused"`
+ Pending bool `json:"pending"`
+ Labels []string `json:"labels"`
+}
+
+// ginkgoNode is used to construct the outline as a tree
+type ginkgoNode struct {
+ ginkgoMetadata
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+type walkFunc func(n *ginkgoNode)
+
+func (n *ginkgoNode) PreOrder(f walkFunc) {
+ f(n)
+ for _, m := range n.Nodes {
+ m.PreOrder(f)
+ }
+}
+
+func (n *ginkgoNode) PostOrder(f walkFunc) {
+ for _, m := range n.Nodes {
+ m.PostOrder(f)
+ }
+ f(n)
+}
+
+func (n *ginkgoNode) Walk(pre, post walkFunc) {
+ pre(n)
+ for _, m := range n.Nodes {
+ m.Walk(pre, post)
+ }
+ post(n)
+}
+
+// PropagateInheritedProperties propagates the Pending and Focused properties
+// through the subtree rooted at n.
+func (n *ginkgoNode) PropagateInheritedProperties() {
+ n.PreOrder(func(thisNode *ginkgoNode) {
+ for _, descendantNode := range thisNode.Nodes {
+ if thisNode.Pending {
+ descendantNode.Pending = true
+ descendantNode.Focused = false
+ }
+ if thisNode.Focused && !descendantNode.Pending {
+ descendantNode.Focused = true
+ }
+ }
+ })
+}
+
+// BackpropagateUnfocus propagates the Focused property through the subtree
+// rooted at n. It applies the rule described in the Ginkgo docs:
+// > Nested programmatically focused specs follow a simple rule: if a
+// > leaf-node is marked focused, any of its ancestor nodes that are marked
+// > focus will be unfocused.
+func (n *ginkgoNode) BackpropagateUnfocus() {
+ focusedSpecInSubtreeStack := []bool{}
+ n.PostOrder(func(thisNode *ginkgoNode) {
+ if thisNode.Spec {
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
+ return
+ }
+ focusedSpecInSubtree := false
+ for range thisNode.Nodes {
+ focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
+ focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
+ }
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
+ if focusedSpecInSubtree {
+ thisNode.Focused = false
+ }
+ })
+
+}
+
+func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
+ switch ex := ce.Fun.(type) {
+ case *ast.Ident:
+ return "", ex.Name, true
+ case *ast.SelectorExpr:
+ pkgID, ok := ex.X.(*ast.Ident)
+ if !ok {
+ return "", "", false
+ }
+ // A package identifier is top-level, so Obj must be nil
+ if pkgID.Obj != nil {
+ return "", "", false
+ }
+ if ex.Sel == nil {
+ return "", "", false
+ }
+ return pkgID.Name, ex.Sel.Name, true
+ default:
+ return "", "", false
+ }
+}
+
+// absoluteOffsetsForNode derives the absolute character offsets of the node start and
+// end positions.
+func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
+ return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
+}
+
+// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
+// corresponding to a Ginkgo container or spec.
+func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
+ packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
+ if !ok {
+ return nil, false
+ }
+
+ n := ginkgoNode{}
+ n.Name = identName
+ n.Start, n.End = absoluteOffsetsForNode(fset, ce)
+ n.Nodes = make([]*ginkgoNode, 0)
+ switch identName {
+ case "It", "Specify", "Entry":
+ n.Spec = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FIt", "FSpecify", "FEntry":
+ n.Spec = true
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
+ n.Spec = true
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "Context", "Describe", "When", "DescribeTable":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FContext", "FDescribe", "FWhen", "FDescribeTable":
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "By":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterEach", "BeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "JustAfterEach", "JustBeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterSuite", "BeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ default:
+ return nil, false
+ }
+}
+
+// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
+// container. If it cannot derive it, it returns the alt text.
+func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
+ text, defined := textFromCallExpr(ce)
+ if !defined {
+ return alt
+ }
+ return text
+}
+
+// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
+// it cannot derive it, it returns false.
+func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
+ if len(ce.Args) < 1 {
+ return "", false
+ }
+ text, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok {
+ return "", false
+ }
+ switch text.Kind {
+ case token.CHAR, token.STRING:
+ // For token.CHAR and token.STRING, Value is quoted
+ unquoted, err := strconv.Unquote(text.Value)
+ if err != nil {
+ // If unquoting fails, just use the raw Value
+ return text.Value, true
+ }
+ return unquoted, true
+ default:
+ return text.Value, true
+ }
+}
+
+func labelFromCallExpr(ce *ast.CallExpr) []string {
+
+ labels := []string{}
+ if len(ce.Args) < 2 {
+ return labels
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Label" {
+ ls := extractLabels(expr)
+ labels = append(labels, ls...)
+ }
+ }
+ }
+ return labels
+}
+
+func extractLabels(expr *ast.CallExpr) []string {
+ out := []string{}
+ for _, arg := range expr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+
+ return out
+}
+
+func pendingFromCallExpr(ce *ast.CallExpr) bool {
+
+ pending := false
+ if len(ce.Args) < 2 {
+ return pending
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Pending" {
+ pending = true
+ }
+ case *ast.Ident:
+ if expr.Name == "Pending" {
+ pending = true
+ }
+ }
+ }
+ return pending
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
new file mode 100644
index 000000000..f0a6b5d26
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Most of the required functions were available in the
+// "golang.org/x/tools/go/ast/astutil" package, but not exported.
+// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
+
+package outline
+
+import (
+ "go/ast"
+ "strconv"
+ "strings"
+)
+
+// packageNameForImport returns the package name for the package. If the package
+// is not imported, it returns nil. "Package name" refers to `pkgname` in the
+// call expression `pkgname.ExportedIdentifier`. Examples:
+// (import path not found) -> nil
+// "import example.com/pkg/foo" -> "foo"
+// "import fooalias example.com/pkg/foo" -> "fooalias"
+// "import . example.com/pkg/foo" -> ""
+func packageNameForImport(f *ast.File, path string) *string {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return nil
+ }
+ name := spec.Name.String()
+ if name == "" {
+ name = "ginkgo"
+ }
+ if name == "." {
+ name = ""
+ }
+ return &name
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if strings.HasPrefix(importPath(s), path) {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return t
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
new file mode 100644
index 000000000..c2327cda8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
@@ -0,0 +1,110 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ // ginkgoImportPath is the well-known ginkgo import path
+ ginkgoImportPath = "github.com/onsi/ginkgo/v2"
+)
+
+// FromASTFile returns an outline for a Ginkgo test source file
+func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
+ ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
+ if ginkgoPackageName == nil {
+ return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
+ }
+
+ root := ginkgoNode{}
+ stack := []*ginkgoNode{&root}
+ ispr := inspector.New([]*ast.File{src})
+ ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
+ if push {
+ // Pre-order traversal
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ // Because `Nodes` calls this function only when the node is an
+ // ast.CallExpr, this should never happen
+ panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
+ }
+ gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
+ if !ok {
+ // Node is not a Ginkgo spec or container, continue
+ return true
+ }
+ parent := stack[len(stack)-1]
+ parent.Nodes = append(parent.Nodes, gn)
+ stack = append(stack, gn)
+ return true
+ }
+ // Post-order traversal
+ start, end := absoluteOffsetsForNode(fset, node)
+ lastVisitedGinkgoNode := stack[len(stack)-1]
+ if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
+ // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
+ return true
+ }
+ stack = stack[0 : len(stack)-1]
+ return true
+ })
+ if len(root.Nodes) == 0 {
+ return &outline{[]*ginkgoNode{}}, nil
+ }
+
+ // Derive the final focused property for all nodes. This must be done
+ // _before_ propagating the inherited focused property.
+ root.BackpropagateUnfocus()
+ // Now, propagate inherited properties, including focused and pending.
+ root.PropagateInheritedProperties()
+
+ return &outline{root.Nodes}, nil
+}
+
+type outline struct {
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+func (o *outline) MarshalJSON() ([]byte, error) {
+ return json.Marshal(o.Nodes)
+}
+
+// String returns a CSV-formatted outline. Spec or container are output in
+// depth-first order.
+func (o *outline) String() string {
+ return o.StringIndent(0)
+}
+
+// StringIndent returns a CSV-formated outline, but every line is indented by
+// one 'width' of spaces for every level of nesting.
+func (o *outline) StringIndent(width int) string {
+ var b strings.Builder
+ b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
+
+ currentIndent := 0
+ pre := func(n *ginkgoNode) {
+ b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
+ var labels string
+ if len(n.Labels) == 1 {
+ labels = n.Labels[0]
+ } else {
+ labels = strings.Join(n.Labels, ", ")
+ }
+ //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
+ b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
+ currentIndent += width
+ }
+ post := func(n *ginkgoNode) {
+ currentIndent -= width
+ }
+ for _, n := range o.Nodes {
+ n.Walk(pre, post)
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
new file mode 100644
index 000000000..36698d46a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
@@ -0,0 +1,98 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // indentWidth is the width used by the 'indent' output
+ indentWidth = 4
+ // stdinAlias is a portable alias for stdin. This convention is used in
+ // other CLIs, e.g., kubectl.
+ stdinAlias = "-"
+ usageCommand = "ginkgo outline "
+)
+
+type outlineConfig struct {
+ Format string
+}
+
+func BuildOutlineCommand() command.Command {
+ conf := outlineConfig{
+ Format: "csv",
+ }
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "format", KeyPath: "Format",
+ Usage: "Format of outline",
+ UsageArgument: "one of 'csv', 'indent', or 'json'",
+ UsageDefaultValue: conf.Format,
+ },
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "outline",
+ Usage: "ginkgo outline ",
+ ShortDoc: "Create an outline of Ginkgo symbols for a file",
+ Documentation: "To read from stdin, use: `ginkgo outline -`",
+ DocLink: "creating-an-outline-of-specs",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ outlineFile(args, conf.Format)
+ },
+ }
+}
+
+func outlineFile(args []string, format string) {
+ if len(args) != 1 {
+ command.AbortWithUsage("outline expects exactly one argument")
+ }
+
+ filename := args[0]
+ var src *os.File
+ if filename == stdinAlias {
+ src = os.Stdin
+ } else {
+ var err error
+ src, err = os.Open(filename)
+ command.AbortIfError("Failed to open file:", err)
+ }
+
+ fset := token.NewFileSet()
+
+ parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
+ command.AbortIfError("Failed to parse source:", err)
+
+ o, err := FromASTFile(fset, parsedSrc)
+ command.AbortIfError("Failed to create outline:", err)
+
+ var oerr error
+ switch format {
+ case "csv":
+ _, oerr = fmt.Print(o)
+ case "indent":
+ _, oerr = fmt.Print(o.StringIndent(indentWidth))
+ case "json":
+ b, err := json.Marshal(o)
+ if err != nil {
+ println(fmt.Sprintf("error marshalling to json: %s", err))
+ }
+ _, oerr = fmt.Println(string(b))
+ default:
+ command.AbortWith("Format %s not accepted", format)
+ }
+ command.AbortIfError("Failed to write outline:", oerr)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
new file mode 100644
index 000000000..aaed4d570
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -0,0 +1,232 @@
+package run
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildRunCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "run",
+ Flags: flags,
+ Usage: "ginkgo run -- ",
+ ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "running-tests",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ runner := &SpecRunner{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ runner.RunSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecRunner struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, r.cliConfig, true)
+ skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
+ suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(skippedSuites) > 0 {
+ fmt.Println("Will skip:")
+ for _, skippedSuite := range skippedSuites {
+ fmt.Println(" " + skippedSuite.Path)
+ }
+ }
+
+ if len(skippedSuites) > 0 && len(suites) == 0 {
+ command.AbortGracefullyWith("All tests skipped! Exiting...")
+ }
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
+ r.reporterConfig.Succinct = true
+ }
+
+ t := time.Now()
+ var endTime time.Time
+ if r.suiteConfig.Timeout > 0 {
+ endTime = t.Add(r.suiteConfig.Timeout)
+ }
+
+ iteration := 0
+OUTER_LOOP:
+ for {
+ if !r.flags.WasSet("seed") {
+ r.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+ if r.cliConfig.RandomizeSuites && len(suites) > 1 {
+ suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
+ }
+
+ opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, r.goFlagsConfig)
+
+ SUITE_LOOP:
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break SUITE_LOOP
+ }
+ suites[suiteIdx] = suite
+
+ if r.interruptHandler.Status().Interrupted() {
+ opc.StopAndDrain()
+ break OUTER_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
+ fmt.Printf("Skipping %s (no test files)\n", suite.Path)
+ continue SUITE_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suites[suiteIdx].CompilationError.Error())
+ if !r.cliConfig.KeepGoing {
+ opc.StopAndDrain()
+ }
+ continue SUITE_LOOP
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
+ suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+
+ if !endTime.IsZero() {
+ r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ if r.suiteConfig.Timeout <= 0 {
+ suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+ }
+
+ suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ if iteration > 0 {
+ fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
+ }
+ break OUTER_LOOP
+ }
+
+ if r.cliConfig.UntilItFails {
+ fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
+ } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
+ fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
+ } else {
+ break OUTER_LOOP
+ }
+ iteration += 1
+ }
+
+ internal.Cleanup(r.goFlagsConfig, suites...)
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+
+ fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
+ if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Printf("Test Suite Passed\n")
+ fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+ command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
+ } else {
+ fmt.Printf("Test Suite Passed\n")
+ command.Abort(command.AbortDetails{})
+ }
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, "")
+ if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ fmt.Fprintln(formatter.ColorableStdOut,
+ internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
+ }
+ fmt.Printf("Test Suite Failed\n")
+ command.Abort(command.AbortDetails{ExitCode: 1})
+ }
+}
+
+func orcMessage(iteration int) string {
+ if iteration < 10 {
+ return ""
+ } else if iteration < 30 {
+ return []string{
+ "If at first you succeed...",
+ "...try, try again.",
+ "Looking good!",
+ "Still good...",
+ "I think your tests are fine....",
+ "Yep, still passing",
+ "Oh boy, here I go testin' again!",
+ "Even the gophers are getting bored",
+ "Did you try -race?",
+ "Maybe you should stop now?",
+ "I'm getting tired...",
+ "What if I just made you a sandwich?",
+ "Hit ^C, hit ^C, please hit ^C",
+ "Make it stop. Please!",
+ "Come on! Enough is enough!",
+ "Dave, this conversation can serve no purpose anymore. Goodbye.",
+ "Just what do you think you're doing, Dave? ",
+ "I, Sisyphus",
+ "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+ "I guess Einstein never tried to churn butter",
+ }[iteration-10] + "\n"
+ } else {
+ return "No, seriously... you can probably stop now.\n"
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
new file mode 100644
index 000000000..7dd294394
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
@@ -0,0 +1,186 @@
+package unfocus
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func BuildUnfocusCommand() command.Command {
+ return command.Command{
+ Name: "unfocus",
+ Usage: "ginkgo unfocus",
+ ShortDoc: "Recursively unfocus any focused tests under the current directory",
+ DocLink: "filtering-specs",
+ Command: func(_ []string, _ []string) {
+ unfocusSpecs()
+ },
+ }
+}
+
+func unfocusSpecs() {
+ fmt.Println("Scanning for focus...")
+
+ goFiles := make(chan string)
+ go func() {
+ unfocusDir(goFiles, ".")
+ close(goFiles)
+ }()
+
+ const workers = 10
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+
+ for i := 0; i < workers; i++ {
+ go func() {
+ for path := range goFiles {
+ unfocusFile(path)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func unfocusDir(goFiles chan string, path string) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ for _, f := range files {
+ switch {
+ case f.IsDir() && shouldProcessDir(f.Name()):
+ unfocusDir(goFiles, filepath.Join(path, f.Name()))
+ case !f.IsDir() && shouldProcessFile(f.Name()):
+ goFiles <- filepath.Join(path, f.Name())
+ }
+ }
+}
+
+func shouldProcessDir(basename string) bool {
+ return basename != "vendor" && !strings.HasPrefix(basename, ".")
+}
+
+func shouldProcessFile(basename string) bool {
+ return strings.HasSuffix(basename, ".go")
+}
+
+func unfocusFile(path string) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ fmt.Printf("error reading file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
+ if err != nil {
+ fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ eliminations := scanForFocus(ast)
+ if len(eliminations) == 0 {
+ return
+ }
+
+ fmt.Printf("...updating %s\n", path)
+ backup, err := writeBackup(path, data)
+ if err != nil {
+ fmt.Printf("error creating backup file: %s\n", err.Error())
+ return
+ }
+
+ if err := updateFile(path, data, eliminations); err != nil {
+ fmt.Printf("error writing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ os.Remove(backup)
+}
+
+func writeBackup(path string, data []byte) (string, error) {
+ t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
+
+ if err != nil {
+ return "", fmt.Errorf("error creating temporary file: %w", err)
+ }
+ defer t.Close()
+
+ if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
+ return "", fmt.Errorf("error writing to temporary file: %w", err)
+ }
+
+ return t.Name(), nil
+}
+
+func updateFile(path string, data []byte, eliminations [][]int64) error {
+ to, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
+ }
+ defer to.Close()
+
+ from := bytes.NewReader(data)
+ var cursor int64
+ for _, eliminationRange := range eliminations {
+ positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
+ if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
+ return fmt.Errorf("error copying data: %w", err)
+ }
+
+ cursor = positionToEliminate + lengthToEliminate
+
+ if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
+ return fmt.Errorf("error seeking to position in buffer: %w", err)
+ }
+ }
+
+ if _, err := io.Copy(to, from); err != nil {
+ return fmt.Errorf("error copying end data: %w", err)
+ }
+
+ return nil
+}
+
+func scanForFocus(file *ast.File) (eliminations [][]int64) {
+ ast.Inspect(file, func(n ast.Node) bool {
+ if c, ok := n.(*ast.CallExpr); ok {
+ if i, ok := c.Fun.(*ast.Ident); ok {
+ if isFocus(i.Name) {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
+ }
+ }
+ }
+
+ if i, ok := n.(*ast.Ident); ok {
+ if i.Name == "Focus" {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
+ }
+ }
+
+ return true
+ })
+
+ return eliminations
+}
+
+func isFocus(name string) bool {
+ switch name {
+ case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
new file mode 100644
index 000000000..6c485c5b1
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
@@ -0,0 +1,22 @@
+package watch
+
+import "sort"
+
+type Delta struct {
+ ModifiedPackages []string
+
+ NewSuites []*Suite
+ RemovedSuites []*Suite
+ modifiedSuites []*Suite
+}
+
+type DescendingByDelta []*Suite
+
+func (a DescendingByDelta) Len() int { return len(a) }
+func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
+
+func (d Delta) ModifiedSuites() []*Suite {
+ sort.Sort(DescendingByDelta(d.modifiedSuites))
+ return d.modifiedSuites
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
new file mode 100644
index 000000000..26418ac62
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
@@ -0,0 +1,75 @@
+package watch
+
+import (
+ "fmt"
+
+ "regexp"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type SuiteErrors map[internal.TestSuite]error
+
+type DeltaTracker struct {
+ maxDepth int
+ watchRegExp *regexp.Regexp
+ suites map[string]*Suite
+ packageHashes *PackageHashes
+}
+
+func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
+ return &DeltaTracker{
+ maxDepth: maxDepth,
+ watchRegExp: watchRegExp,
+ packageHashes: NewPackageHashes(watchRegExp),
+ suites: map[string]*Suite{},
+ }
+}
+
+func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
+ errors = SuiteErrors{}
+ delta.ModifiedPackages = d.packageHashes.CheckForChanges()
+
+ providedSuitePaths := map[string]bool{}
+ for _, suite := range suites {
+ providedSuitePaths[suite.Path] = true
+ }
+
+ d.packageHashes.StartTrackingUsage()
+
+ for _, suite := range d.suites {
+ if providedSuitePaths[suite.Suite.Path] {
+ if suite.Delta() > 0 {
+ delta.modifiedSuites = append(delta.modifiedSuites, suite)
+ }
+ } else {
+ delta.RemovedSuites = append(delta.RemovedSuites, suite)
+ }
+ }
+
+ d.packageHashes.StopTrackingUsageAndPrune()
+
+ for _, suite := range suites {
+ _, ok := d.suites[suite.Path]
+ if !ok {
+ s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
+ if err != nil {
+ errors[suite] = err
+ continue
+ }
+ d.suites[suite.Path] = s
+ delta.NewSuites = append(delta.NewSuites, s)
+ }
+ }
+
+ return delta, errors
+}
+
+func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
+ s, ok := d.suites[suite.Path]
+ if !ok {
+ return fmt.Errorf("unknown suite %s", suite.Path)
+ }
+
+ return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
new file mode 100644
index 000000000..a34d94354
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
@@ -0,0 +1,92 @@
+package watch
+
+import (
+ "go/build"
+ "regexp"
+)
+
+var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
+var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
+
+type Dependencies struct {
+ deps map[string]int
+}
+
+func NewDependencies(path string, maxDepth int) (Dependencies, error) {
+ d := Dependencies{
+ deps: map[string]int{},
+ }
+
+ if maxDepth == 0 {
+ return d, nil
+ }
+
+ err := d.seedWithDepsForPackageAtPath(path)
+ if err != nil {
+ return d, err
+ }
+
+ for depth := 1; depth < maxDepth; depth++ {
+ n := len(d.deps)
+ d.addDepsForDepth(depth)
+ if n == len(d.deps) {
+ break
+ }
+ }
+
+ return d, nil
+}
+
+func (d Dependencies) Dependencies() map[string]int {
+ return d.deps
+}
+
+func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
+ pkg, err := build.ImportDir(path, 0)
+ if err != nil {
+ return err
+ }
+
+ d.resolveAndAdd(pkg.Imports, 1)
+ d.resolveAndAdd(pkg.TestImports, 1)
+ d.resolveAndAdd(pkg.XTestImports, 1)
+
+ delete(d.deps, pkg.Dir)
+ return nil
+}
+
+func (d Dependencies) addDepsForDepth(depth int) {
+ for dep, depDepth := range d.deps {
+ if depDepth == depth {
+ d.addDepsForDep(dep, depth+1)
+ }
+ }
+}
+
+func (d Dependencies) addDepsForDep(dep string, depth int) {
+ pkg, err := build.ImportDir(dep, 0)
+ if err != nil {
+ println(err.Error())
+ return
+ }
+ d.resolveAndAdd(pkg.Imports, depth)
+}
+
+func (d Dependencies) resolveAndAdd(deps []string, depth int) {
+ for _, dep := range deps {
+ pkg, err := build.Import(dep, ".", 0)
+ if err != nil {
+ continue
+ }
+ if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
+ d.addDepIfNotPresent(pkg.Dir, depth)
+ }
+ }
+}
+
+func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
+ _, ok := d.deps[dep]
+ if !ok {
+ d.deps[dep] = depth
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
new file mode 100644
index 000000000..0e6ae1f29
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -0,0 +1,117 @@
+package watch
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+ "time"
+)
+
+var goTestRegExp = regexp.MustCompile(`_test\.go$`)
+
+type PackageHash struct {
+ CodeModifiedTime time.Time
+ TestModifiedTime time.Time
+ Deleted bool
+
+ path string
+ codeHash string
+ testHash string
+ watchRegExp *regexp.Regexp
+}
+
+func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
+ p := &PackageHash{
+ path: path,
+ watchRegExp: watchRegExp,
+ }
+
+ p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
+
+ return p
+}
+
+func (p *PackageHash) CheckForChanges() bool {
+ codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
+
+ if deleted {
+ if !p.Deleted {
+ t := time.Now()
+ p.CodeModifiedTime = t
+ p.TestModifiedTime = t
+ }
+ p.Deleted = true
+ return true
+ }
+
+ modified := false
+ p.Deleted = false
+
+ if p.codeHash != codeHash {
+ p.CodeModifiedTime = codeModifiedTime
+ modified = true
+ }
+ if p.testHash != testHash {
+ p.TestModifiedTime = testModifiedTime
+ modified = true
+ }
+
+ p.codeHash = codeHash
+ p.testHash = testHash
+ return modified
+}
+
+func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
+ entries, err := os.ReadDir(p.path)
+
+ if err != nil {
+ deleted = true
+ return
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ continue
+ }
+
+ if isHiddenFile(info) {
+ continue
+ }
+
+ if goTestRegExp.MatchString(info.Name()) {
+ testHash += p.hashForFileInfo(info)
+ if info.ModTime().After(testModifiedTime) {
+ testModifiedTime = info.ModTime()
+ }
+ continue
+ }
+
+ if p.watchRegExp.MatchString(info.Name()) {
+ codeHash += p.hashForFileInfo(info)
+ if info.ModTime().After(codeModifiedTime) {
+ codeModifiedTime = info.ModTime()
+ }
+ }
+ }
+
+ testHash += codeHash
+ if codeModifiedTime.After(testModifiedTime) {
+ testModifiedTime = codeModifiedTime
+ }
+
+ return
+}
+
+func isHiddenFile(info os.FileInfo) bool {
+ return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
+}
+
+func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
+ return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
new file mode 100644
index 000000000..b4892bebf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
@@ -0,0 +1,85 @@
+package watch
+
+import (
+ "path/filepath"
+ "regexp"
+ "sync"
+)
+
+type PackageHashes struct {
+ PackageHashes map[string]*PackageHash
+ usedPaths map[string]bool
+ watchRegExp *regexp.Regexp
+ lock *sync.Mutex
+}
+
+func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
+ return &PackageHashes{
+ PackageHashes: map[string]*PackageHash{},
+ usedPaths: nil,
+ watchRegExp: watchRegExp,
+ lock: &sync.Mutex{},
+ }
+}
+
+func (p *PackageHashes) CheckForChanges() []string {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ modified := []string{}
+
+ for _, packageHash := range p.PackageHashes {
+ if packageHash.CheckForChanges() {
+ modified = append(modified, packageHash.path)
+ }
+ }
+
+ return modified
+}
+
+func (p *PackageHashes) Add(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ _, ok := p.PackageHashes[path]
+ if !ok {
+ p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
+ }
+
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) Get(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) StartTrackingUsage() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.usedPaths = map[string]bool{}
+}
+
+func (p *PackageHashes) StopTrackingUsageAndPrune() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ for path := range p.PackageHashes {
+ if !p.usedPaths[path] {
+ delete(p.PackageHashes, path)
+ }
+ }
+
+ p.usedPaths = nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
new file mode 100644
index 000000000..53272df7e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
@@ -0,0 +1,87 @@
+package watch
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type Suite struct {
+ Suite internal.TestSuite
+ RunTime time.Time
+ Dependencies Dependencies
+
+ sharedPackageHashes *PackageHashes
+}
+
+func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+ deps, err := NewDependencies(suite.Path, maxDepth)
+ if err != nil {
+ return nil, err
+ }
+
+ sharedPackageHashes.Add(suite.Path)
+ for dep := range deps.Dependencies() {
+ sharedPackageHashes.Add(dep)
+ }
+
+ return &Suite{
+ Suite: suite,
+ Dependencies: deps,
+
+ sharedPackageHashes: sharedPackageHashes,
+ }, nil
+}
+
+func (s *Suite) Delta() float64 {
+ delta := s.delta(s.Suite.Path, true, 0) * 1000
+ for dep, depth := range s.Dependencies.Dependencies() {
+ delta += s.delta(dep, false, depth)
+ }
+ return delta
+}
+
+func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
+ s.RunTime = time.Now()
+
+ deps, err := NewDependencies(s.Suite.Path, maxDepth)
+ if err != nil {
+ return err
+ }
+
+ s.sharedPackageHashes.Add(s.Suite.Path)
+ for dep := range deps.Dependencies() {
+ s.sharedPackageHashes.Add(dep)
+ }
+
+ s.Dependencies = deps
+
+ return nil
+}
+
+func (s *Suite) Description() string {
+ numDeps := len(s.Dependencies.Dependencies())
+ pluralizer := "ies"
+ if numDeps == 1 {
+ pluralizer = "y"
+ }
+ return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
+}
+
+func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
+ return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
+}
+
+func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
+ packageHash := s.sharedPackageHashes.Get(packagePath)
+ var modifiedTime time.Time
+ if includeTests {
+ modifiedTime = packageHash.TestModifiedTime
+ } else {
+ modifiedTime = packageHash.CodeModifiedTime
+ }
+
+ return modifiedTime.Sub(s.RunTime)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
new file mode 100644
index 000000000..bde4193ce
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -0,0 +1,192 @@
+package watch
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildWatchCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "watch",
+ Flags: flags,
+ Usage: "ginkgo watch -- ",
+ ShortDoc: "Watch the passed in and runs their tests whenever changes occur.",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "watching-for-changes",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ watcher := &SpecWatcher{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ watcher.WatchSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecWatcher struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
+ deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
+ delta, errors := deltaTracker.Delta(suites)
+
+ fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
+ for _, suite := range delta.NewSuites {
+ fmt.Println(" " + suite.Description())
+ }
+
+ for suite, err := range errors {
+ fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+ }
+
+ if len(suites) == 1 {
+ w.updateSeed()
+ w.compileAndRun(suites[0], additionalArgs)
+ }
+
+ ticker := time.NewTicker(time.Second)
+
+ for {
+ select {
+ case <-ticker.C:
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ delta, _ := deltaTracker.Delta(suites)
+ coloredStream := formatter.ColorableStdOut
+
+ suites = internal.TestSuites{}
+
+ if len(delta.NewSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
+ for _, suite := range delta.NewSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ }
+
+ modifiedSuites := delta.ModifiedSuites()
+ if len(modifiedSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
+ for _, pkg := range delta.ModifiedPackages {
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
+ }
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
+ for _, suite := range modifiedSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ fmt.Fprintln(coloredStream, "")
+ }
+
+ if len(suites) == 0 {
+ break
+ }
+
+ w.updateSeed()
+ w.computeSuccinctMode(len(suites))
+ for idx := range suites {
+ if w.interruptHandler.Status().Interrupted() {
+ return
+ }
+ deltaTracker.WillRun(suites[idx])
+ suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
+ }
+ color := "{{green}}"
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ color = "{{red}}"
+ }
+ fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+ case <-w.interruptHandler.Status().Channel:
+ return
+ }
+ }
+}
+
+func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
+ suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ return suite
+ }
+ if w.interruptHandler.Status().Interrupted() {
+ return suite
+ }
+ suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
+ internal.Cleanup(w.goFlagsConfig, suite)
+ return suite
+}
+
+func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
+ if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
+ w.reporterConfig.Succinct = false
+ return
+ }
+
+ if w.flags.WasSet("succinct") {
+ return
+ }
+
+ if numSuites == 1 {
+ w.reporterConfig.Succinct = false
+ }
+
+ if numSuites > 1 {
+ w.reporterConfig.Succinct = true
+ }
+}
+
+func (w *SpecWatcher) updateSeed() {
+ if !w.flags.WasSet("seed") {
+ w.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
new file mode 100644
index 000000000..85162720f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
@@ -0,0 +1,8 @@
+//go:build ginkgoclidependencies
+// +build ginkgoclidependencies
+
+package ginkgo
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
new file mode 100644
index 000000000..02c6739e5
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -0,0 +1,180 @@
+package ginkgo
+
+import (
+ "testing"
+
+ "github.com/onsi/ginkgo/v2/internal/testingtproxy"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
+
+GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
+
+GinkgoT() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+
+GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following:
+
+- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf.
+- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model.
+
+You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
+*/
+func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
+ offset := 1
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return testingtproxy.New(
+ GinkgoWriter,
+ Fail,
+ Skip,
+ DeferCleanup,
+ CurrentSpecReport,
+ AddReportEntry,
+ GinkgoRecover,
+ AttachProgressReporter,
+ suiteConfig.RandomSeed,
+ suiteConfig.ParallelProcess,
+ suiteConfig.ParallelTotal,
+ reporterConfig.NoColor,
+ offset)
+}
+
+/*
+The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
+*/
+type GinkgoTInterface interface {
+ Cleanup(func())
+ Setenv(kev, value string)
+ Error(args ...any)
+ Errorf(format string, args ...any)
+ Fail()
+ FailNow()
+ Failed() bool
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Helper()
+ Log(args ...any)
+ Logf(format string, args ...any)
+ Name() string
+ Parallel()
+ Skip(args ...any)
+ SkipNow()
+ Skipf(format string, args ...any)
+ Skipped() bool
+ TempDir() string
+}
+
+/*
+Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
+*/
+type FullGinkgoTInterface interface {
+ GinkgoTInterface
+
+ AddReportEntryVisibilityAlways(name string, args ...any)
+ AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
+ AddReportEntryVisibilityNever(name string, args ...any)
+
+ //Prints to the GinkgoWriter
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
+
+ //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
+ F(format string, args ...any) string
+ Fi(indentation uint, format string, args ...any) string
+ Fiw(indentation uint, maxWidth uint, format string, args ...any) string
+
+ //Generates a formatted string version of the current spec's timeline
+ RenderTimeline() string
+
+ GinkgoRecover()
+ DeferCleanup(args ...any)
+
+ RandomSeed() int64
+ ParallelProcess() int
+ ParallelTotal() int
+
+ AttachProgressReporter(func() string) func()
+}
+
+/*
+GinkgoTB() implements a wrapper that exactly matches the testing.TB interface.
+
+In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB.
+
+This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB.
+
+Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+*/
+func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper {
+ offset := 2
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)}
+}
+
+type GinkgoTBWrapper struct {
+ testing.TB
+ GinkgoT FullGinkgoTInterface
+}
+
+func (g *GinkgoTBWrapper) Cleanup(f func()) {
+ g.GinkgoT.Cleanup(f)
+}
+func (g *GinkgoTBWrapper) Error(args ...any) {
+ g.GinkgoT.Error(args...)
+}
+func (g *GinkgoTBWrapper) Errorf(format string, args ...any) {
+ g.GinkgoT.Errorf(format, args...)
+}
+func (g *GinkgoTBWrapper) Fail() {
+ g.GinkgoT.Fail()
+}
+func (g *GinkgoTBWrapper) FailNow() {
+ g.GinkgoT.FailNow()
+}
+func (g *GinkgoTBWrapper) Failed() bool {
+ return g.GinkgoT.Failed()
+}
+func (g *GinkgoTBWrapper) Fatal(args ...any) {
+ g.GinkgoT.Fatal(args...)
+}
+func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) {
+ g.GinkgoT.Fatalf(format, args...)
+}
+func (g *GinkgoTBWrapper) Helper() {
+ types.MarkAsHelper(1)
+}
+func (g *GinkgoTBWrapper) Log(args ...any) {
+ g.GinkgoT.Log(args...)
+}
+func (g *GinkgoTBWrapper) Logf(format string, args ...any) {
+ g.GinkgoT.Logf(format, args...)
+}
+func (g *GinkgoTBWrapper) Name() string {
+ return g.GinkgoT.Name()
+}
+func (g *GinkgoTBWrapper) Setenv(key, value string) {
+ g.GinkgoT.Setenv(key, value)
+}
+func (g *GinkgoTBWrapper) Skip(args ...any) {
+ g.GinkgoT.Skip(args...)
+}
+func (g *GinkgoTBWrapper) SkipNow() {
+ g.GinkgoT.SkipNow()
+}
+func (g *GinkgoTBWrapper) Skipf(format string, args ...any) {
+ g.GinkgoT.Skipf(format, args...)
+}
+func (g *GinkgoTBWrapper) Skipped() bool {
+ return g.GinkgoT.Skipped()
+}
+func (g *GinkgoTBWrapper) TempDir() string {
+ return g.GinkgoT.TempDir()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
new file mode 100644
index 000000000..712d85afb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
@@ -0,0 +1,9 @@
+package internal
+
+func MakeIncrementingIndexCounter() func() (int, error) {
+ idx := -1
+ return func() (int, error) {
+ idx += 1
+ return idx, nil
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
new file mode 100644
index 000000000..e9bd9565f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -0,0 +1,99 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Failer struct {
+ lock *sync.Mutex
+ failure types.Failure
+ state types.SpecState
+}
+
+func NewFailer() *Failer {
+ return &Failer{
+ lock: &sync.Mutex{},
+ state: types.SpecStatePassed,
+ }
+}
+
+func (f *Failer) GetState() types.SpecState {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.state
+}
+
+func (f *Failer) GetFailure() types.Failure {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.failure
+}
+
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStatePanicked
+ f.failure = types.Failure{
+ Message: "Test Panicked",
+ Location: location,
+ ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
+ }
+ }
+}
+
+func (f *Failer) Fail(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateFailed
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Skip(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateSkipped
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateAborted
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
+ }
+}
+
+func (f *Failer) Drain() (types.SpecState, types.Failure) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ failure := f.failure
+ outcome := f.state
+
+ f.state = types.SpecStatePassed
+ f.failure = types.Failure{}
+
+ return outcome, failure
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
new file mode 100644
index 000000000..e3da7d14d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -0,0 +1,122 @@
+package internal
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
+unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
+It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
+this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
+
+As a common example, consider:
+
+ FDescribe("something to debug", function() {
+ It("works", function() {...})
+ It("works", function() {...})
+ FIt("doesn't work", function() {...})
+ It("works", function() {...})
+ })
+
+here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
+The nested policy applied by this function enables this behavior.
+*/
+func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
+ var walkTree func(tree *TreeNode) bool
+ walkTree = func(tree *TreeNode) bool {
+ if tree.Node.MarkedPending {
+ return false
+ }
+ hasFocusedDescendant := false
+ for _, child := range tree.Children {
+ childHasFocus := walkTree(child)
+ hasFocusedDescendant = hasFocusedDescendant || childHasFocus
+ }
+ tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
+ return tree.Node.MarkedFocus || hasFocusedDescendant
+ }
+
+ walkTree(tree)
+}
+
+/*
+Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
+It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
+
+When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters.
+
+This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
+- If there are no CLI arguments and no programmatic focus, do nothing.
+- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus.
+- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
+
+*Note:* specs with pending nodes are Skipped when created by NewSpec.
+*/
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+ focusString := strings.Join(suiteConfig.FocusStrings, "|")
+ skipString := strings.Join(suiteConfig.SkipStrings, "|")
+
+ type SkipCheck func(spec Spec) bool
+
+ // by default, skip any specs marked pending
+ skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
+ hasProgrammaticFocus := false
+
+ for _, spec := range specs {
+ if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
+ hasProgrammaticFocus = true
+ break
+ }
+ }
+
+ if hasProgrammaticFocus {
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
+ })
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if focusString != "" {
+ // skip specs that don't match the focus string
+ re := regexp.MustCompile(focusString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
+ }
+
+ if skipString != "" {
+ // skip specs that match the skip string
+ re := regexp.MustCompile(skipString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
+ }
+
+ // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
+ processedSpecs := Specs{}
+ for _, spec := range specs {
+ for _, skipCheck := range skipChecks {
+ if skipCheck(spec) {
+ spec.Skip = true
+ break
+ }
+ }
+ processedSpecs = append(processedSpecs, spec)
+ }
+
+ return processedSpecs, hasProgrammaticFocus
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
new file mode 100644
index 000000000..464e3c97f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
@@ -0,0 +1,28 @@
+package global
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+var Suite *internal.Suite
+var Failer *internal.Failer
+var backupSuite *internal.Suite
+
+func init() {
+ InitializeGlobals()
+}
+
+func InitializeGlobals() {
+ Failer = internal.NewFailer()
+ Suite = internal.NewSuite()
+}
+
+func PushClone() error {
+ var err error
+ backupSuite, err = Suite.Clone()
+ return err
+}
+
+func PopClone() {
+ Suite = backupSuite
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
new file mode 100644
index 000000000..02c9fe4fc
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -0,0 +1,383 @@
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type runOncePair struct {
+ //nodeId should only run once...
+ nodeID uint
+ nodeType types.NodeType
+ //...for specs in a hierarchy that includes this context
+ containerID uint
+}
+
+func (pair runOncePair) isZero() bool {
+ return pair.nodeID == 0
+}
+
+func runOncePairForNode(node Node, containerID uint) runOncePair {
+ return runOncePair{
+ nodeID: node.ID,
+ nodeType: node.NodeType,
+ containerID: containerID,
+ }
+}
+
+type runOncePairs []runOncePair
+
+func runOncePairsForSpec(spec Spec) runOncePairs {
+ pairs := runOncePairs{}
+
+ containers := spec.Nodes.WithType(types.NodeTypeContainer)
+ for _, node := range spec.Nodes {
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
+ } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
+ passedIntoAnOrderedContainer := false
+ firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
+ passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
+ return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
+ })
+ if firstOrderedContainerDeeperThanNode.IsZero() {
+ continue
+ }
+ pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
+ }
+ }
+
+ return pairs
+}
+
+func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
+ for i := range pairs {
+ if pairs[i].nodeID == nodeID {
+ return pairs[i]
+ }
+ }
+ return runOncePair{}
+}
+
+func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
+ for i := range pairs {
+ if pairs[i] == pair {
+ return true
+ }
+ }
+ return false
+}
+
+func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
+ count := 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(runOncePairs, count), 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ out[j] = pairs[i]
+ j++
+ }
+ }
+ return out
+}
+
+type group struct {
+ suite *Suite
+ specs Specs
+ runOncePairs map[uint]runOncePairs
+ runOnceTracker map[runOncePair]types.SpecState
+
+ succeeded bool
+ failedInARunOnceBefore bool
+ continueOnFailure bool
+}
+
+func newGroup(suite *Suite) *group {
+ return &group{
+ suite: suite,
+ runOncePairs: map[uint]runOncePairs{},
+ runOnceTracker: map[runOncePair]types.SpecState{},
+ succeeded: true,
+ failedInARunOnceBefore: false,
+ continueOnFailure: false,
+ }
+}
+
+func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
+ return types.SpecReport{
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ }
+}
+
+func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
+ if spec.Nodes.HasNodeMarkedPending() {
+ return types.SpecStatePending, types.Failure{}
+ }
+ if spec.Skip {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.succeeded && !g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because an earlier spec in an ordered container failed")
+ }
+ if g.failedInARunOnceBefore && g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because a BeforeAll node failed")
+ }
+ beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
+ for _, pair := range beforeOncePairs {
+ if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
+ }
+ }
+ if g.suite.config.DryRun {
+ return types.SpecStatePassed, types.Failure{}
+ }
+ return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
+}
+
+func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
+ lastSpecID := uint(0)
+ for idx := range g.specs {
+ if g.specs[idx].Skip {
+ continue
+ }
+ sID := g.specs[idx].SubjectID()
+ if g.runOncePairs[sID].hasRunOncePair(pair) {
+ lastSpecID = sID
+ }
+ }
+ return lastSpecID == specID
+}
+
+func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
+ failedInARunOnceBefore := false
+ pairs := g.runOncePairs[spec.SubjectID()]
+
+ nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
+ nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
+ terminatingNode, terminatingPair := Node{}, runOncePair{}
+
+ deadline := time.Time{}
+ if spec.SpecTimeout() > 0 {
+ deadline = time.Now().Add(spec.SpecTimeout())
+ }
+
+ for _, node := range nodes {
+ oncePair := pairs.runOncePairFor(node.ID)
+ if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
+ continue
+ }
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if !oncePair.isZero() {
+ g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
+ }
+ if g.suite.currentSpecReport.State != types.SpecStatePassed {
+ terminatingNode, terminatingPair = node, oncePair
+ failedInARunOnceBefore = !terminatingPair.isZero()
+ break
+ }
+ }
+
+ afterNodeWasRun := map[uint]bool{}
+ includeDeferCleanups := false
+ for {
+ nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
+ nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
+ if !terminatingNode.IsZero() {
+ nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
+ }
+ if includeDeferCleanups {
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
+ }
+ nodes = nodes.Filter(func(node Node) bool {
+ if afterNodeWasRun[node.ID] {
+ //this node has already been run on this attempt, don't rerun it
+ return false
+ }
+ var pair runOncePair
+ switch node.NodeType {
+ case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
+ // check if we were generated in an AfterNode that has already run
+ if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
+ return true // we were, so we should definitely run this cleanup now
+ }
+ // looks like this cleanup nodes was generated by a before node or it.
+ // the run-once status of a cleanup node is governed by the run-once status of its generator
+ pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
+ default:
+ pair = pairs.runOncePairFor(node.ID)
+ }
+ if pair.isZero() {
+ // this node is not governed by any run-once policy, we should run it
+ return true
+ }
+ // it's our last chance to run if we're the last spec for our oncePair
+ isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
+
+ switch g.suite.currentSpecReport.State {
+ case types.SpecStatePassed: //this attempt is passing...
+ return isLastSpecWithPair //...we should run-once if we'this is our last chance
+ case types.SpecStateSkipped: //the spec was skipped by the user...
+ if isLastSpecWithPair {
+ return true //...we're the last spec, so we should run the AfterNode
+ }
+ if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
+ return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
+ }
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
+ if isFinalAttempt {
+ if g.continueOnFailure {
+ return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
+ } else {
+ return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
+ }
+ }
+ if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
+ return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
+ } else {
+ return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
+ }
+ }
+ case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
+ return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
+ }
+ return false
+ })
+
+ if len(nodes) == 0 && includeDeferCleanups {
+ break
+ }
+
+ for _, node := range nodes {
+ afterNodeWasRun[node.ID] = true
+ state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
+ g.suite.currentSpecReport.State = state
+ g.suite.currentSpecReport.Failure = failure
+ } else if state.Is(types.SpecStateFailureStates) {
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
+ }
+ }
+ includeDeferCleanups = true
+ }
+
+ return failedInARunOnceBefore
+}
+
+func (g *group) run(specs Specs) {
+ g.specs = specs
+ g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
+ for _, spec := range g.specs {
+ g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
+ }
+
+ for _, spec := range g.specs {
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = g.initialReportForSpec(spec)
+ g.suite.selectiveLock.Unlock()
+
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
+ g.suite.reporter.WillRun(g.suite.currentSpecReport)
+ g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
+
+ skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
+
+ g.suite.currentSpecReport.StartTime = time.Now()
+ failedInARunOnceBefore := false
+ if !skip {
+ var maxAttempts = 1
+
+ if g.suite.config.MustPassRepeatedly > 0 {
+ maxAttempts = g.suite.config.MustPassRepeatedly
+ g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts
+ } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ maxAttempts = max(1, spec.MustPassRepeatedly())
+ } else if g.suite.config.FlakeAttempts > 0 {
+ maxAttempts = g.suite.config.FlakeAttempts
+ g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts
+ } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ maxAttempts = max(1, spec.FlakeAttempts())
+ }
+
+ for attempt := 0; attempt < maxAttempts; attempt++ {
+ g.suite.currentSpecReport.NumAttempts = attempt + 1
+ g.suite.writer.Truncate()
+ g.suite.outputInterceptor.StartInterceptingOutput()
+ if attempt > 0 {
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
+ }
+ }
+
+ failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
+
+ g.suite.currentSpecReport.EndTime = time.Now()
+ g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
+ g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
+ g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
+
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) {
+ break
+ }
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
+ break
+ } else if attempt < maxAttempts-1 {
+ af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
+ af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
+ }
+ }
+ }
+ }
+
+ g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
+ g.suite.processCurrentSpecReport()
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ g.succeeded = false
+ g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
+ }
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = types.SpecReport{}
+ g.suite.selectiveLock.Unlock()
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
new file mode 100644
index 000000000..8ed86111f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -0,0 +1,177 @@
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+)
+
+var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
+
+type InterruptCause uint
+
+const (
+ InterruptCauseInvalid InterruptCause = iota
+ InterruptCauseSignal
+ InterruptCauseAbortByOtherProcess
+)
+
+type InterruptLevel uint
+
+const (
+ InterruptLevelUninterrupted InterruptLevel = iota
+ InterruptLevelCleanupAndReport
+ InterruptLevelReportOnly
+ InterruptLevelBailOut
+)
+
+func (ic InterruptCause) String() string {
+ switch ic {
+ case InterruptCauseSignal:
+ return "Interrupted by User"
+ case InterruptCauseAbortByOtherProcess:
+ return "Interrupted by Other Ginkgo Process"
+ }
+ return "INVALID_INTERRUPT_CAUSE"
+}
+
+type InterruptStatus struct {
+ Channel chan interface{}
+ Level InterruptLevel
+ Cause InterruptCause
+}
+
+func (s InterruptStatus) Interrupted() bool {
+ return s.Level != InterruptLevelUninterrupted
+}
+
+func (s InterruptStatus) Message() string {
+ return s.Cause.String()
+}
+
+func (s InterruptStatus) ShouldIncludeProgressReport() bool {
+ return s.Cause != InterruptCauseAbortByOtherProcess
+}
+
+type InterruptHandlerInterface interface {
+ Status() InterruptStatus
+}
+
+type InterruptHandler struct {
+ c chan interface{}
+ lock *sync.Mutex
+ level InterruptLevel
+ cause InterruptCause
+ client parallel_support.Client
+ stop chan interface{}
+ signals []os.Signal
+ requestAbortCheck chan interface{}
+}
+
+func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
+ if len(signals) == 0 {
+ signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
+ }
+ handler := &InterruptHandler{
+ c: make(chan interface{}),
+ lock: &sync.Mutex{},
+ stop: make(chan interface{}),
+ requestAbortCheck: make(chan interface{}),
+ client: client,
+ signals: signals,
+ }
+ handler.registerForInterrupts()
+ return handler
+}
+
+func (handler *InterruptHandler) Stop() {
+ close(handler.stop)
+}
+
+func (handler *InterruptHandler) registerForInterrupts() {
+ // os signal handling
+ signalChannel := make(chan os.Signal, 1)
+ signal.Notify(signalChannel, handler.signals...)
+
+ // cross-process abort handling
+ var abortChannel chan interface{}
+ if handler.client != nil {
+ abortChannel = make(chan interface{})
+ go func() {
+ pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
+ for {
+ select {
+ case <-pollTicker.C:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.requestAbortCheck:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.stop:
+ pollTicker.Stop()
+ return
+ }
+ }
+ }()
+ }
+
+ go func(abortChannel chan interface{}) {
+ var interruptCause InterruptCause
+ for {
+ select {
+ case <-signalChannel:
+ interruptCause = InterruptCauseSignal
+ case <-abortChannel:
+ interruptCause = InterruptCauseAbortByOtherProcess
+ case <-handler.stop:
+ signal.Stop(signalChannel)
+ return
+ }
+ abortChannel = nil
+
+ handler.lock.Lock()
+ oldLevel := handler.level
+ handler.cause = interruptCause
+ if handler.level == InterruptLevelUninterrupted {
+ handler.level = InterruptLevelCleanupAndReport
+ } else if handler.level == InterruptLevelCleanupAndReport {
+ handler.level = InterruptLevelReportOnly
+ } else if handler.level == InterruptLevelReportOnly {
+ handler.level = InterruptLevelBailOut
+ }
+ if handler.level != oldLevel {
+ close(handler.c)
+ handler.c = make(chan interface{})
+ }
+ handler.lock.Unlock()
+ }
+ }(abortChannel)
+}
+
+func (handler *InterruptHandler) Status() InterruptStatus {
+ handler.lock.Lock()
+ status := InterruptStatus{
+ Level: handler.level,
+ Channel: handler.c,
+ Cause: handler.cause,
+ }
+ handler.lock.Unlock()
+
+ if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
+ close(handler.requestAbortCheck)
+ <-status.Channel
+ return handler.Status()
+ }
+
+ return status
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
new file mode 100644
index 000000000..bf0de496d
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
@@ -0,0 +1,15 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func SwallowSigQuit() {
+ c := make(chan os.Signal, 1024)
+ signal.Notify(c, syscall.SIGQUIT)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
new file mode 100644
index 000000000..fcf8da833
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package interrupt_handler
+
+func SwallowSigQuit() {
+ //noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
new file mode 100644
index 000000000..6a15f19ae
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -0,0 +1,935 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _global_node_id_counter = uint(0)
+var _global_id_mutex = &sync.Mutex{}
+
+func UniqueNodeID() uint {
+ // There's a reace in the internal integration tests if we don't make
+ // accessing _global_node_id_counter safe across goroutines.
+ _global_id_mutex.Lock()
+ defer _global_id_mutex.Unlock()
+ _global_node_id_counter += 1
+ return _global_node_id_counter
+}
+
+type Node struct {
+ ID uint
+ NodeType types.NodeType
+
+ Text string
+ Body func(SpecContext)
+ CodeLocation types.CodeLocation
+ NestingLevel int
+ HasContext bool
+
+ SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
+ SynchronizedBeforeSuiteProc1BodyHasContext bool
+ SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
+ SynchronizedBeforeSuiteAllProcsBodyHasContext bool
+
+ SynchronizedAfterSuiteAllProcsBody func(SpecContext)
+ SynchronizedAfterSuiteAllProcsBodyHasContext bool
+ SynchronizedAfterSuiteProc1Body func(SpecContext)
+ SynchronizedAfterSuiteProc1BodyHasContext bool
+
+ ReportEachBody func(SpecContext, types.SpecReport)
+ ReportSuiteBody func(SpecContext, types.Report)
+
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+
+ NodeIDWhereCleanupWasGenerated uint
+}
+
+// Decoration Types
+type focusType bool
+type pendingType bool
+type serialType bool
+type orderedType bool
+type continueOnFailureType bool
+type honorsOrderedType bool
+type suppressProgressReporting bool
+
+const Focus = focusType(true)
+const Pending = pendingType(true)
+const Serial = serialType(true)
+const Ordered = orderedType(true)
+const ContinueOnFailure = continueOnFailureType(true)
+const OncePerOrdered = honorsOrderedType(true)
+const SuppressProgressReporting = suppressProgressReporting(true)
+
+type FlakeAttempts uint
+type MustPassRepeatedly uint
+type Offset uint
+type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
+type Labels []string
+type PollProgressInterval time.Duration
+type PollProgressAfter time.Duration
+type NodeTimeout time.Duration
+type SpecTimeout time.Duration
+type GracePeriod time.Duration
+
+func (l Labels) MatchesLabelFilter(query string) bool {
+ return types.MustParseLabelFilter(query)(l)
+}
+
+func UnionOfLabels(labels ...Labels) Labels {
+ out := Labels{}
+ seen := map[string]bool{}
+ for _, labelSet := range labels {
+ for _, label := range labelSet {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
+ decorations := []interface{}{}
+ remainingArgs := []interface{}{}
+ for _, arg := range args {
+ if isDecoration(arg) {
+ decorations = append(decorations, arg)
+ } else {
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+ return decorations, remainingArgs
+}
+
+func isDecoration(arg interface{}) bool {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ return false
+ case t == reflect.TypeOf(Offset(0)):
+ return true
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ return true
+ case t == reflect.TypeOf(Focus):
+ return true
+ case t == reflect.TypeOf(Pending):
+ return true
+ case t == reflect.TypeOf(Serial):
+ return true
+ case t == reflect.TypeOf(Ordered):
+ return true
+ case t == reflect.TypeOf(ContinueOnFailure):
+ return true
+ case t == reflect.TypeOf(OncePerOrdered):
+ return true
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ return true
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ return true
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ return true
+ case t == reflect.TypeOf(Labels{}):
+ return true
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ return true
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ return true
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ return true
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ return true
+ case t == reflect.TypeOf(GracePeriod(0)):
+ return true
+ case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
+ return true
+ default:
+ return false
+ }
+}
+
+func isSliceOfDecorations(slice interface{}) bool {
+ vSlice := reflect.ValueOf(slice)
+ if vSlice.Len() == 0 {
+ return false
+ }
+ for i := 0; i < vSlice.Len(); i++ {
+ if !isDecoration(vSlice.Index(i).Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+ baseOffset := 2
+ node := Node{
+ ID: UniqueNodeID(),
+ NodeType: nodeType,
+ Text: text,
+ Labels: Labels{},
+ CodeLocation: types.NewCodeLocation(baseOffset),
+ NestingLevel: -1,
+ PollProgressAfter: -1,
+ PollProgressInterval: -1,
+ GracePeriod: -1,
+ }
+
+ errors := []error{}
+ appendError := func(err error) {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ args = unrollInterfaceSlice(args)
+
+ remainingArgs := []interface{}{}
+ // First get the CodeLocation up-to-date
+ for _, arg := range args {
+ switch v := arg.(type) {
+ case Offset:
+ node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
+ case types.CodeLocation:
+ node.CodeLocation = v
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ labelsSeen := map[string]bool{}
+ trackedFunctionError := false
+ args = remainingArgs
+ remainingArgs = []interface{}{}
+ // now process the rest of the args
+ for _, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(float64(0)):
+ break // ignore deprecated timeouts
+ case t == reflect.TypeOf(Focus):
+ node.MarkedFocus = bool(arg.(focusType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
+ }
+ case t == reflect.TypeOf(Pending):
+ node.MarkedPending = bool(arg.(pendingType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
+ }
+ case t == reflect.TypeOf(Serial):
+ node.MarkedSerial = bool(arg.(serialType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
+ }
+ case t == reflect.TypeOf(Ordered):
+ node.MarkedOrdered = bool(arg.(orderedType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
+ }
+ case t == reflect.TypeOf(ContinueOnFailure):
+ node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
+ }
+ case t == reflect.TypeOf(OncePerOrdered):
+ node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
+ if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
+ }
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ node.FlakeAttempts = int(arg.(FlakeAttempts))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
+ }
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ node.MustPassRepeatedly = int(arg.(MustPassRepeatedly))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly"))
+ }
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
+ }
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
+ }
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ node.NodeTimeout = time.Duration(arg.(NodeTimeout))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
+ }
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ node.SpecTimeout = time.Duration(arg.(SpecTimeout))
+ if !nodeType.Is(types.NodeTypeIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
+ }
+ case t == reflect.TypeOf(GracePeriod(0)):
+ node.GracePeriod = time.Duration(arg.(GracePeriod))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
+ }
+ case t == reflect.TypeOf(Labels{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
+ }
+ for _, label := range arg.(Labels) {
+ if !labelsSeen[label] {
+ labelsSeen[label] = true
+ label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
+ node.Labels = append(node.Labels, label)
+ appendError(err)
+ }
+ }
+ case t.Kind() == reflect.Func:
+ if nodeType.Is(types.NodeTypeContainer) {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if t.NumOut() > 0 || t.NumIn() > 0 {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body := arg.(func())
+ node.Body = func(SpecContext) { body() }
+ } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
+ if node.ReportEachBody == nil {
+ if fn, ok := arg.(func(types.SpecReport)); ok {
+ node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
+ } else {
+ node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ if node.ReportSuiteBody == nil {
+ if fn, ok := arg.(func(types.Report)); ok {
+ node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
+ } else {
+ node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
+ if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedBeforeSuiteProc1Body == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
+ } else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
+ if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedAfterSuiteAllProcsBody == nil {
+ node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
+ } else if node.SynchronizedAfterSuiteProc1Body == nil {
+ node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
+ }
+ } else {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if node.Body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ }
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ // validations
+ if node.MarkedPending && node.MarkedFocus {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
+ }
+
+ if node.MarkedContinueOnFailure && !node.MarkedOrdered {
+ appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
+ }
+
+ hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+
+ if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
+ appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
+ }
+
+ if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ for _, arg := range remainingArgs {
+ appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
+ }
+
+ if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType))
+ }
+
+ if len(errors) > 0 {
+ return Node{}, errors
+ }
+
+ return node, errors
+}
+
+var doneType = reflect.TypeOf(make(Done))
+
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+ t := reflect.TypeOf(arg)
+ if t.NumOut() > 0 || t.NumIn() > 1 {
+ return nil, false
+ }
+ if t.NumIn() == 1 {
+ if t.In(0) == doneType {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
+ deprecatedAsyncBody := arg.(func(Done))
+ return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
+ } else if t.In(0).Implements(specContextType) {
+ return arg.(func(SpecContext)), true
+ } else if t.In(0).Implements(contextType) {
+ body := arg.(func(context.Context))
+ return func(c SpecContext) { body(c) }, true
+ }
+
+ return nil, false
+ }
+
+ body := arg.(func())
+ return func(SpecContext) { body() }, false
+}
+
+var byteType = reflect.TypeOf([]byte{})
+
+func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+
+ if t.NumOut() > 1 || t.NumIn() > 1 {
+ return nil, false
+ } else if t.NumOut() == 1 && t.Out(0) != byteType {
+ return nil, false
+ } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
+ return nil, false
+ }
+ hasContext := t.NumIn() == 1
+
+ return func(c SpecContext) []byte {
+ var out []reflect.Value
+ if hasContext {
+ out = v.Call([]reflect.Value{reflect.ValueOf(c)})
+ } else {
+ out = v.Call([]reflect.Value{})
+ }
+ if len(out) == 1 {
+ return (out[0].Interface()).([]byte)
+ } else {
+ return []byte{}
+ }
+ }, hasContext
+}
+
+func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+ hasContext, hasByte := false, false
+
+ if t.NumOut() > 0 || t.NumIn() > 2 {
+ return nil, false
+ } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
+ hasContext, hasByte = true, true
+ } else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
+ hasContext = true
+ } else if t.NumIn() == 1 && t.In(0) == byteType {
+ hasByte = true
+ } else if t.NumIn() != 0 {
+ return nil, false
+ }
+
+ return func(c SpecContext, b []byte) {
+ in := []reflect.Value{}
+ if hasContext {
+ in = append(in, reflect.ValueOf(c))
+ }
+ if hasByte {
+ in = append(in, reflect.ValueOf(b))
+ }
+ v.Call(in)
+ }, hasContext
+}
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+ decorations, remainingArgs := PartitionDecorations(args...)
+ baseOffset := 2
+ cl := types.NewCodeLocation(baseOffset)
+ finalArgs := []interface{}{}
+ for _, arg := range decorations {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(Offset(0)):
+ cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ cl = arg.(types.CodeLocation)
+ default:
+ finalArgs = append(finalArgs, arg)
+ }
+ }
+ finalArgs = append(finalArgs, cl)
+
+ if len(remainingArgs) == 0 {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callback := reflect.ValueOf(remainingArgs[0])
+ if !(callback.Kind() == reflect.Func) {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callArgs := []reflect.Value{}
+ for _, arg := range remainingArgs[1:] {
+ callArgs = append(callArgs, reflect.ValueOf(arg))
+ }
+
+ hasContext := false
+ t := callback.Type()
+ if t.NumIn() > 0 {
+ if t.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
+ hasContext = true
+ }
+ }
+
+ handleFailure := func(out []reflect.Value) {
+ if len(out) == 0 {
+ return
+ }
+ last := out[len(out)-1]
+ if last.Type().Implements(errInterface) && !last.IsNil() {
+ fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl)
+ }
+ }
+
+ if hasContext {
+ finalArgs = append(finalArgs, func(c SpecContext) {
+ out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
+ handleFailure(out)
+ })
+ } else {
+ finalArgs = append(finalArgs, func() {
+ out := callback.Call(callArgs)
+ handleFailure(out)
+ })
+ }
+
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+}
+
+func (n Node) IsZero() bool {
+ return n.ID == 0
+}
+
+/* Nodes */
+type Nodes []Node
+
+func (n Nodes) Clone() Nodes {
+ nodes := make(Nodes, len(n))
+ copy(nodes, n)
+ return nodes
+}
+
+func (n Nodes) CopyAppend(nodes ...Node) Nodes {
+ numN := len(n)
+ out := make(Nodes, numN+len(nodes))
+ copy(out, n)
+ for j, node := range nodes {
+ out[numN+j] = node
+ }
+ return out
+}
+
+func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
+ pivotIdx := len(n)
+ for i := range n {
+ if n[i].ID == pivot.ID {
+ pivotIdx = i
+ break
+ }
+ }
+ left := n[:pivotIdx]
+ right := Nodes{}
+ if pivotIdx+1 < len(n) {
+ right = n[pivotIdx+1:]
+ }
+
+ return left, right
+}
+
+func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
+ idxToExclude := len(n)
+ for i := range n {
+ if n[i].ID == nodeToExclude.ID {
+ idxToExclude = i
+ break
+ }
+ }
+ if idxToExclude == len(n) {
+ return n
+ }
+ out, j := make(Nodes, len(n)-1), 0
+ for i := range n {
+ if i == idxToExclude {
+ continue
+ }
+ out[j] = n[i]
+ j++
+ }
+ return out
+}
+
+func (n Nodes) Filter(filter func(Node) bool) Nodes {
+ trufa, count := make([]bool, len(n)), 0
+ for i := range n {
+ if filter(n[i]) {
+ trufa[i] = true
+ count += 1
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if trufa[i] {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
+ for i := range n {
+ if filter(n[i]) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ count++
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) SortedByDescendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel > out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) SortedByAscendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel < out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) FirstWithNestingLevel(level int) Node {
+ for i := range n {
+ if n[i].NestingLevel == level {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) Reverse() Nodes {
+ out := make(Nodes, len(n))
+ for i := range n {
+ out[len(n)-1-i] = n[i]
+ }
+ return out
+}
+
+func (n Nodes) Texts() []string {
+ out := make([]string, len(n))
+ for i := range n {
+ out[i] = n[i].Text
+ }
+ return out
+}
+
+func (n Nodes) Labels() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].Labels == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].Labels)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfLabels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, label := range n[i].Labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func (n Nodes) CodeLocations() []types.CodeLocation {
+ out := make([]types.CodeLocation, len(n))
+ for i := range n {
+ out[i] = n[i].CodeLocation
+ }
+ return out
+}
+
+func (n Nodes) BestTextFor(node Node) string {
+ if node.Text != "" {
+ return node.Text
+ }
+ parentNestingLevel := node.NestingLevel - 1
+ for i := range n {
+ if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
+ return n[i].Text
+ }
+ }
+
+ return ""
+}
+
+func (n Nodes) ContainsNodeID(id uint) bool {
+ for i := range n {
+ if n[i].ID == id {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedPending() bool {
+ for i := range n {
+ if n[i].MarkedPending {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedFocus() bool {
+ for i := range n {
+ if n[i].MarkedFocus {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedSerial() bool {
+ for i := range n {
+ if n[i].MarkedSerial {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) FirstNodeMarkedOrdered() Node {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n Nodes) GetMaxFlakeAttempts() int {
+ maxFlakeAttempts := 0
+ for i := range n {
+ if n[i].FlakeAttempts > 0 {
+ maxFlakeAttempts = n[i].FlakeAttempts
+ }
+ }
+ return maxFlakeAttempts
+}
+
+func (n Nodes) GetMaxMustPassRepeatedly() int {
+ maxMustPassRepeatedly := 0
+ for i := range n {
+ if n[i].MustPassRepeatedly > 0 {
+ maxMustPassRepeatedly = n[i].MustPassRepeatedly
+ }
+ }
+ return maxMustPassRepeatedly
+}
+
+func unrollInterfaceSlice(args interface{}) []interface{} {
+ v := reflect.ValueOf(args)
+ if v.Kind() != reflect.Slice {
+ return []interface{}{args}
+ }
+ out := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ el := reflect.ValueOf(v.Index(i).Interface())
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
+ out = append(out, unrollInterfaceSlice(el.Interface())...)
+ } else {
+ out = append(out, v.Index(i).Interface())
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
new file mode 100644
index 000000000..84eea0a59
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -0,0 +1,171 @@
+package internal
+
+import (
+ "math/rand"
+ "sort"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SortableSpecs struct {
+ Specs Specs
+ Indexes []int
+}
+
+func NewSortableSpecs(specs Specs) *SortableSpecs {
+ indexes := make([]int, len(specs))
+ for i := range specs {
+ indexes[i] = i
+ }
+ return &SortableSpecs{
+ Specs: specs,
+ Indexes: indexes,
+ }
+}
+func (s *SortableSpecs) Len() int { return len(s.Indexes) }
+func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
+func (s *SortableSpecs) Less(i, j int) bool {
+ a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
+
+ aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
+
+ firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
+ if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
+ // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
+ return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
+ }
+
+ // if either spec is in an ordered container - only use the nodes up to the outermost ordered container
+ if firstOrderedAIdx > -1 {
+ aNodes = aNodes[:firstOrderedAIdx+1]
+ }
+ if firstOrderedBIdx > -1 {
+ bNodes = bNodes[:firstOrderedBIdx+1]
+ }
+
+ for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
+ aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
+ if aCL.FileName != bCL.FileName {
+ return aCL.FileName < bCL.FileName
+ }
+ if aCL.LineNumber != bCL.LineNumber {
+ return aCL.LineNumber < bCL.LineNumber
+ }
+ }
+ // either everything is equal or we have different lengths of CLs
+ if len(aNodes) != len(bNodes) {
+ return len(aNodes) < len(bNodes)
+ }
+ // ok, now we are sure everything was equal. so we use the spec text to break ties
+ for i := 0; i < len(aNodes); i++ {
+ if aNodes[i].Text != bNodes[i].Text {
+ return aNodes[i].Text < bNodes[i].Text
+ }
+ }
+ // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
+ return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
+}
+
+type GroupedSpecIndices []SpecIndices
+type SpecIndices []int
+
+func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
+ /*
+ Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
+ order for a given seed across test runs.
+
+ By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
+ experience - specs within a given container run in the order they appear in the file.
+
+ Developers can set -randomizeAllSpecs to shuffle _all_ specs.
+
+ In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
+
+ Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
+ */
+
+ // Seed a new random source based on thee configured random seed.
+ r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
+
+ // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
+ sortableSpecs := NewSortableSpecs(specs)
+ sort.Sort(sortableSpecs)
+
+ // then we break things into execution groups
+ // a group represents a single unit of execution and is a collection of SpecIndices
+ // usually a group is just a single spec, however ordered containers must be preserved as a single group
+ executionGroupIDs := []uint{}
+ executionGroups := map[uint]SpecIndices{}
+ for _, idx := range sortableSpecs.Indexes {
+ spec := specs[idx]
+ groupNode := spec.Nodes.FirstNodeMarkedOrdered()
+ if groupNode.IsZero() {
+ groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
+ }
+ executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
+ if len(executionGroups[groupNode.ID]) == 1 {
+ executionGroupIDs = append(executionGroupIDs, groupNode.ID)
+ }
+ }
+
+ // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
+ // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
+ shufflableGroupingIDs := []uint{}
+ shufflableGroupingIDToGroupIDs := map[uint][]uint{}
+
+ // for each execution group we're going to have to pick a node to represent how the
+ // execution group is grouped for shuffling:
+ nodeTypesToShuffle := types.NodeTypesForContainerAndIt
+ if suiteConfig.RandomizeAllSpecs {
+ nodeTypesToShuffle = types.NodeTypeIt
+ }
+
+ //so, for each execution group:
+ for _, groupID := range executionGroupIDs {
+ // pick out a representative spec
+ representativeSpec := specs[executionGroups[groupID][0]]
+
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
+
+ //add the execution group to its shufflable group
+ shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
+
+ //and if it's the first one in
+ if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
+ // record the shuffleable group ID
+ shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
+ }
+ }
+
+ // now we permute the sorted shufflable grouping IDs and build the ordered Groups
+ orderedGroups := GroupedSpecIndices{}
+ permutation := r.Perm(len(shufflableGroupingIDs))
+ for _, j := range permutation {
+ //let's get the execution group IDs for this shufflable group:
+ executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
+ // and we'll add their associated specindices to the orderedGroups slice:
+ for _, executionGroupID := range executionGroupIDsForJ {
+ orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
+ }
+ }
+
+ // If we're running in series, we're done.
+ if suiteConfig.ParallelTotal == 1 {
+ return orderedGroups, GroupedSpecIndices{}
+ }
+
+ // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
+ // The parallelizable groups will run across all Ginkgo processes...
+ // ...the serial groups will only run on Process #1 after all other processes have exited.
+ parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
+ for _, specIndices := range orderedGroups {
+ if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
+ serialGroups = append(serialGroups, specIndices)
+ } else {
+ parallelizableGroups = append(parallelizableGroups, specIndices)
+ }
+ }
+
+ return parallelizableGroups, serialGroups
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
new file mode 100644
index 000000000..4a1c09461
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -0,0 +1,250 @@
+package internal
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "time"
+)
+
+const BAILOUT_TIME = 1 * time.Second
+const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
+
+When running in parallel, Ginkgo captures stdout and stderr output
+and attaches it to the running spec. It looks like that process is getting
+stuck for this suite.
+
+This usually happens if you, or a library you are using, spin up an external
+process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
+causes the external process to keep Ginkgo's output interceptor pipe open and
+causes output interception to hang.
+
+Ginkgo has detected this and shortcircuited the capture process. The specs
+will continue running after this message however output from the external
+process that caused this issue will not be captured.
+
+You have several options to fix this. In preferred order they are:
+
+1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
+2. Ensure your process exits before the current spec completes. If your
+process is long-lived and must cross spec boundaries, this option won't
+work for you.
+3. Pause Ginkgo's output interceptor before starting your process and then
+resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
+to do this.
+4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
+turn off all output interception but allow specs to run in parallel without this
+issue. You may miss important output if you do this including output from Go's
+race detector.
+
+More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
+`
+
+/*
+The OutputInterceptor is used by to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput()
+ StartInterceptingOutputAndForwardTo(io.Writer)
+ StopInterceptingAndReturnOutput() string
+
+ PauseIntercepting()
+ ResumeIntercepting()
+
+ Shutdown()
+}
+
+type NoopOutputInterceptor struct{}
+
+func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
+func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
+func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
+func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
+func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
+func (interceptor NoopOutputInterceptor) Shutdown() {}
+
+type pipePair struct {
+ reader *os.File
+ writer *os.File
+}
+
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+ for {
+ //make the next pipe...
+ pair := pipePair{}
+ pair.reader, pair.writer, _ = os.Pipe()
+ select {
+ //...and provide it to the next consumer (they are responsible for closing the files)
+ case pipeChannel <- pair:
+ continue
+ //...or close the files if we were told to shutdown
+ case <-shutdown:
+ pair.reader.Close()
+ pair.writer.Close()
+ return
+ }
+ }
+}
+
+type interceptorImplementation interface {
+ CreateStdoutStderrClones() (*os.File, *os.File)
+ ConnectPipeToStdoutStderr(*os.File)
+ RestoreStdoutStderrFromClones(*os.File, *os.File)
+ ShutdownClones(*os.File, *os.File)
+}
+
+type genericOutputInterceptor struct {
+ intercepting bool
+
+ stdoutClone *os.File
+ stderrClone *os.File
+ pipe pipePair
+
+ shutdown chan interface{}
+ emergencyBailout chan interface{}
+ pipeChannel chan pipePair
+ interceptedContent chan string
+
+ forwardTo io.Writer
+ accumulatedOutput string
+
+ implementation interceptorImplementation
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
+ interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.accumulatedOutput = ""
+ interceptor.forwardTo = w
+ interceptor.ResumeIntercepting()
+}
+
+func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
+ if interceptor.intercepting {
+ interceptor.PauseIntercepting()
+ }
+ return interceptor.accumulatedOutput
+}
+
+func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.intercepting = true
+ if interceptor.stdoutClone == nil {
+ interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
+ interceptor.shutdown = make(chan interface{})
+ go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
+ }
+
+ // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
+ // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
+ interceptor.pipe = <-interceptor.pipeChannel
+
+ interceptor.emergencyBailout = make(chan interface{})
+
+ //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
+ go func() {
+ buffer := &bytes.Buffer{}
+ destination := io.MultiWriter(buffer, interceptor.forwardTo)
+ copyFinished := make(chan interface{})
+ reader := interceptor.pipe.reader
+ go func() {
+ io.Copy(destination, reader)
+ reader.Close() // close the read end of the pipe so we don't leak a file descriptor
+ close(copyFinished)
+ }()
+ select {
+ case <-copyFinished:
+ interceptor.interceptedContent <- buffer.String()
+ case <-interceptor.emergencyBailout:
+ interceptor.interceptedContent <- ""
+ }
+ }()
+
+ interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
+}
+
+func (interceptor *genericOutputInterceptor) PauseIntercepting() {
+ if !interceptor.intercepting {
+ return
+ }
+ // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
+ // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
+ interceptor.pipe.writer.Close() // the pipewriter itself
+
+ // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
+ // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
+ interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
+
+ var content string
+ select {
+ case content = <-interceptor.interceptedContent:
+ case <-time.After(BAILOUT_TIME):
+ /*
+ By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
+ should eventually receive an EOF and exit.
+
+ **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
+ will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
+
+ That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
+ content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
+ and file descriptor (those these will be cleaned up when the process exits).
+
+ We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
+ */
+ close(interceptor.emergencyBailout)
+ content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
+ }
+
+ interceptor.accumulatedOutput += content
+ interceptor.intercepting = false
+}
+
+func (interceptor *genericOutputInterceptor) Shutdown() {
+ interceptor.PauseIntercepting()
+
+ if interceptor.stdoutClone != nil {
+ close(interceptor.shutdown)
+ interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
+ interceptor.stdoutClone = nil
+ interceptor.stderrClone = nil
+ }
+}
+
+/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
+func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &osGlobalReassigningOutputInterceptorImpl{},
+ }
+}
+
+type osGlobalReassigningOutputInterceptorImpl struct{}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ return os.Stdout, os.Stderr
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ os.Stdout = pipeWriter
+ os.Stderr = pipeWriter
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ os.Stdout = stdoutClone
+ os.Stderr = stderrClone
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
+ //noop
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
new file mode 100644
index 000000000..8a237f446
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -0,0 +1,73 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package internal
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &dupSyscallOutputInterceptorImpl{},
+ }
+}
+
+type dupSyscallOutputInterceptorImpl struct{}
+
+func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ // To clone stdout and stderr we:
+ // First, create two clone file descriptors that point to the stdout and stderr file descriptions
+ stdoutCloneFD, _ := unix.Dup(1)
+ stderrCloneFD, _ := unix.Dup(2)
+
+ // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
+ // https://github.com/onsi/ginkgo/issues/1191
+ flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+ flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+
+ // And then wrap the clone file descriptors in files.
+ // One benefit of this (that we don't use yet) is that we can actually write
+ // to these files to emit output to the console even though we're intercepting output
+ stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
+ stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
+
+ //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
+ //this speeds things up a bit, actually.
+ return stdoutClone, stderrClone
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
+ // to the write end of the pipe.
+ // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
+ // This effectively shunts data written to stdout and stderr to the write end of our pipe
+ unix.Dup2(int(pipeWriter.Fd()), 1)
+ unix.Dup2(int(pipeWriter.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
+ // point to the original file descriptions that we saved off in the clones.
+ // This has the added benefit of closing the connection between these descriptors and the write end of the pipe
+ // which is important to cause the io.Copy on the pipe.Reader to end.
+ unix.Dup2(int(stdoutClone.Fd()), 1)
+ unix.Dup2(int(stderrClone.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
+ // We're done with the clones so we can close them to clean up after ourselves
+ stdoutClone.Close()
+ stderrClone.Close()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
new file mode 100644
index 000000000..4c374935b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
@@ -0,0 +1,7 @@
+//go:build wasm
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &NoopOutputInterceptor{}
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
new file mode 100644
index 000000000..30c2851a8
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return NewOSGlobalReassigningOutputInterceptor()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
new file mode 100644
index 000000000..b3cd64292
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -0,0 +1,72 @@
+package parallel_support
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type BeforeSuiteState struct {
+ Data []byte
+ State types.SpecState
+}
+
+type ParallelIndexCounter struct {
+ Index int
+}
+
+var ErrorGone = fmt.Errorf("gone")
+var ErrorFailed = fmt.Errorf("failed")
+var ErrorEarly = fmt.Errorf("early")
+
+var POLLING_INTERVAL = 50 * time.Millisecond
+
+type Server interface {
+ Start()
+ Close()
+ Address() string
+ RegisterAlive(node int, alive func() bool)
+ GetSuiteDone() chan interface{}
+ GetOutputDestination() io.Writer
+ SetOutputDestination(io.Writer)
+}
+
+type Client interface {
+ Connect() bool
+ Close() error
+
+ PostSuiteWillBegin(report types.Report) error
+ PostDidRun(report types.SpecReport) error
+ PostSuiteDidEnd(report types.Report) error
+ PostReportBeforeSuiteCompleted(state types.SpecState) error
+ BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
+ PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
+ BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
+ BlockUntilNonprimaryProcsHaveFinished() error
+ BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
+ FetchNextCounter() (int, error)
+ PostAbort() error
+ ShouldAbort() bool
+ PostEmitProgressReport(report types.ProgressReport) error
+ Write(p []byte) (int, error)
+}
+
+func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpServer(parallelTotal, reporter)
+ } else {
+ return newRPCServer(parallelTotal, reporter)
+ }
+}
+
+func NewClient(serverHost string) Client {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpClient(serverHost)
+ } else {
+ return newRPCClient(serverHost)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
new file mode 100644
index 000000000..6547c7a66
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -0,0 +1,169 @@
+package parallel_support
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type httpClient struct {
+ serverHost string
+}
+
+func newHttpClient(serverHost string) *httpClient {
+ return &httpClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *httpClient) Connect() bool {
+ resp, err := http.Get(client.serverHost + "/up")
+ if err != nil {
+ return false
+ }
+ resp.Body.Close()
+ return resp.StatusCode == http.StatusOK
+}
+
+func (client *httpClient) Close() error {
+ return nil
+}
+
+func (client *httpClient) post(path string, data interface{}) error {
+ var body io.Reader
+ if data != nil {
+ encoded, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ body = bytes.NewBuffer(encoded)
+ }
+ resp, err := http.Post(client.serverHost+path, "application/json", body)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func (client *httpClient) poll(path string, data interface{}) error {
+ for {
+ resp, err := http.Get(client.serverHost + path)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode == http.StatusTooEarly {
+ resp.Body.Close()
+ time.Sleep(POLLING_INTERVAL)
+ continue
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusGone {
+ return ErrorGone
+ }
+ if resp.StatusCode == http.StatusFailedDependency {
+ return ErrorFailed
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ if data != nil {
+ return json.NewDecoder(resp.Body).Decode(data)
+ }
+ return nil
+ }
+}
+
+func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
+ return client.post("/suite-will-begin", report)
+}
+
+func (client *httpClient) PostDidRun(report types.SpecReport) error {
+ return client.post("/did-run", report)
+}
+
+func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
+ return client.post("/suite-did-end", report)
+}
+
+func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.post("/progress-report", report)
+}
+
+func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.post("/report-before-suite-completed", state)
+}
+
+func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("/report-before-suite-state", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.post("/before-suite-completed", beforeSuiteState)
+}
+
+func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("/before-suite-state", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("/have-nonprimary-procs-finished", nil)
+}
+
+func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("/aggregated-nonprimary-procs-report", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *httpClient) FetchNextCounter() (int, error) {
+ var counter ParallelIndexCounter
+ err := client.poll("/counter", &counter)
+ return counter.Index, err
+}
+
+func (client *httpClient) PostAbort() error {
+ return client.post("/abort", nil)
+}
+
+func (client *httpClient) ShouldAbort() bool {
+ err := client.poll("/abort", nil)
+ if err == ErrorGone {
+ return true
+ }
+ return false
+}
+
+func (client *httpClient) Write(p []byte) (int, error) {
+ resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("failed to emit output")
+ }
+ return len(p), err
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
new file mode 100644
index 000000000..d2c71ab1b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -0,0 +1,242 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "encoding/json"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type httpServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+// Create a new server, automatically selecting a port
+func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &httpServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *httpServer) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
+ mux.HandleFunc("/did-run", server.didRun)
+ mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
+ mux.HandleFunc("/emit-output", server.emitOutput)
+ mux.HandleFunc("/progress-report", server.emitProgressReport)
+
+ //synchronization endpoints
+ mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
+ mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
+ mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
+ mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
+ mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
+ mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/up", server.handleUp)
+ mux.HandleFunc("/abort", server.handleAbort)
+
+ go httpServer.Serve(server.listener)
+}
+
+// Stop the server
+func (server *httpServer) Close() {
+ server.listener.Close()
+}
+
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *httpServer) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+func (server *httpServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *httpServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *httpServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *httpServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
+
+//
+// Streaming Endpoints
+//
+
+// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+ defer request.Body.Close()
+ if json.NewDecoder(request.Body).Decode(object) != nil {
+ writer.WriteHeader(http.StatusBadRequest)
+ return false
+ }
+ return true
+}
+
+func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
+ if err == nil {
+ return false
+ }
+ switch err {
+ case ErrorEarly:
+ writer.WriteHeader(http.StatusTooEarly)
+ case ErrorGone:
+ writer.WriteHeader(http.StatusGone)
+ case ErrorFailed:
+ writer.WriteHeader(http.StatusFailedDependency)
+ default:
+ writer.WriteHeader(http.StatusInternalServerError)
+ }
+ return true
+}
+
+func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
+}
+
+func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
+ var report types.SpecReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.DidRun(report, voidReceiver), writer)
+}
+
+func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
+}
+
+func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
+ output, err := io.ReadAll(request.Body)
+ if err != nil {
+ writer.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ var n int
+ server.handleError(server.handler.EmitOutput(output, &n), writer)
+}
+
+func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
+ var report types.ProgressReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if !server.decode(writer, request, &state) {
+ return
+ }
+
+ server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(state)
+}
+
+func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if !server.decode(writer, request, &beforeSuiteState) {
+ return
+ }
+
+ server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
+}
+
+func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(beforeSuiteState)
+}
+
+func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
+ if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
+ return
+ }
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
+ var aggregatedReport types.Report
+ if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(aggregatedReport)
+}
+
+func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ var n int
+ if server.handleError(server.handler.Counter(voidSender, &n), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
+}
+
+func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "GET" {
+ var shouldAbort bool
+ server.handler.ShouldAbort(voidSender, &shouldAbort)
+ if shouldAbort {
+ writer.WriteHeader(http.StatusGone)
+ } else {
+ writer.WriteHeader(http.StatusOK)
+ }
+ } else {
+ server.handler.Abort(voidSender, voidReceiver)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
new file mode 100644
index 000000000..59e8e6fd0
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -0,0 +1,136 @@
+package parallel_support
+
+import (
+ "net/rpc"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type rpcClient struct {
+ serverHost string
+ client *rpc.Client
+}
+
+func newRPCClient(serverHost string) *rpcClient {
+ return &rpcClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *rpcClient) Connect() bool {
+ var err error
+ if client.client != nil {
+ return true
+ }
+ client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
+ if err != nil {
+ client.client = nil
+ return false
+ }
+ return true
+}
+
+func (client *rpcClient) Close() error {
+ return client.client.Close()
+}
+
+func (client *rpcClient) poll(method string, data interface{}) error {
+ for {
+ err := client.client.Call(method, voidSender, data)
+ if err == nil {
+ return nil
+ }
+ switch err.Error() {
+ case ErrorEarly.Error():
+ time.Sleep(POLLING_INTERVAL)
+ case ErrorGone.Error():
+ return ErrorGone
+ case ErrorFailed.Error():
+ return ErrorFailed
+ default:
+ return err
+ }
+ }
+}
+
+func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
+}
+
+func (client *rpcClient) PostDidRun(report types.SpecReport) error {
+ return client.client.Call("Server.DidRun", report, voidReceiver)
+}
+
+func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
+}
+
+func (client *rpcClient) Write(p []byte) (int, error) {
+ var n int
+ err := client.client.Call("Server.EmitOutput", p, &n)
+ return n, err
+}
+
+func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
+}
+
+func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("Server.ReportBeforeSuiteState", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *rpcClient) FetchNextCounter() (int, error) {
+ var counter int
+ err := client.client.Call("Server.Counter", voidSender, &counter)
+ return counter, err
+}
+
+func (client *rpcClient) PostAbort() error {
+ return client.client.Call("Server.Abort", voidSender, voidReceiver)
+}
+
+func (client *rpcClient) ShouldAbort() bool {
+ var shouldAbort bool
+ client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
+ return shouldAbort
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
new file mode 100644
index 000000000..2620fd562
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -0,0 +1,75 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/rpc"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+)
+
+/*
+RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type RPCServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+//Create a new server, automatically selecting a port
+func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &RPCServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *RPCServer) Start() {
+ rpcServer := rpc.NewServer()
+ rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
+
+ httpServer := &http.Server{}
+ httpServer.Handler = rpcServer
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *RPCServer) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *RPCServer) Address() string {
+ return server.listener.Addr().String()
+}
+
+func (server *RPCServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *RPCServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *RPCServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
new file mode 100644
index 000000000..a6d98793e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -0,0 +1,234 @@
+package parallel_support
+
+import (
+ "io"
+ "os"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Void struct{}
+
+var voidReceiver *Void = &Void{}
+var voidSender Void
+
+// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
+// It handles all the business logic to avoid duplication between the two servers
+
+type ServerHandler struct {
+ done chan interface{}
+ outputDestination io.Writer
+ reporter reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteState BeforeSuiteState
+ reportBeforeSuiteState types.SpecState
+ parallelTotal int
+ counter int
+ counterLock *sync.Mutex
+ shouldAbort bool
+
+ numSuiteDidBegins int
+ numSuiteDidEnds int
+ aggregatedReport types.Report
+ reportHoldingArea []types.SpecReport
+}
+
+func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
+ return &ServerHandler{
+ reporter: reporter,
+ lock: &sync.Mutex{},
+ counterLock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
+
+ parallelTotal: parallelTotal,
+ outputDestination: os.Stdout,
+ done: make(chan interface{}),
+ }
+}
+
+func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidBegins += 1
+
+ // all summaries are identical, so it's fine to simply emit the last one of these
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.SuiteWillBegin(report)
+
+ for _, summary := range handler.reportHoldingArea {
+ handler.reporter.WillRun(summary)
+ handler.reporter.DidRun(summary)
+ }
+
+ handler.reportHoldingArea = nil
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.WillRun(report)
+ handler.reporter.DidRun(report)
+ } else {
+ handler.reportHoldingArea = append(handler.reportHoldingArea, report)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidEnds += 1
+ if handler.numSuiteDidEnds == 1 {
+ handler.aggregatedReport = report
+ } else {
+ handler.aggregatedReport = handler.aggregatedReport.Add(report)
+ }
+
+ if handler.numSuiteDidEnds == handler.parallelTotal {
+ handler.reporter.SuiteDidEnd(handler.aggregatedReport)
+ close(handler.done)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
+ var err error
+ *n, err = handler.outputDestination.Write(output)
+ return err
+}
+
+func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reporter.EmitProgressReport(report)
+ return nil
+}
+
+func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.alives[proc-1] = alive
+}
+
+func (handler *ServerHandler) procIsAlive(proc int) bool {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ alive := handler.alives[proc-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
+ for i := 2; i <= handler.parallelTotal; i++ {
+ if handler.procIsAlive(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reportBeforeSuiteState = reportBeforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.reportBeforeSuiteState == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *reportBeforeSuiteState = handler.reportBeforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.beforeSuiteState = beforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.beforeSuiteState.State == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *beforeSuiteState = handler.beforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
+ if handler.haveNonprimaryProcsFinished() {
+ return nil
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
+ if handler.haveNonprimaryProcsFinished() {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.numSuiteDidEnds == handler.parallelTotal-1 {
+ *report = handler.aggregatedReport
+ return nil
+ } else {
+ return ErrorGone
+ }
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) Counter(_ Void, counter *int) error {
+ handler.counterLock.Lock()
+ defer handler.counterLock.Unlock()
+ *counter = handler.counter
+ handler.counter++
+ return nil
+}
+
+func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.shouldAbort = true
+ return nil
+}
+
+func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ *shouldAbort = handler.shouldAbort
+ return nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
new file mode 100644
index 000000000..11269cf1f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -0,0 +1,287 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _SOURCE_CACHE = map[string][]string{}
+
+type ProgressSignalRegistrar func(func()) context.CancelFunc
+
+func RegisterForProgressSignal(handler func()) context.CancelFunc {
+ signalChannel := make(chan os.Signal, 1)
+ if len(PROGRESS_SIGNALS) > 0 {
+ signal.Notify(signalChannel, PROGRESS_SIGNALS...)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ for {
+ select {
+ case <-signalChannel:
+ handler()
+ case <-ctx.Done():
+ signal.Stop(signalChannel)
+ return
+ }
+ }
+ }()
+
+ return cancel
+}
+
+type ProgressStepCursor struct {
+ Text string
+ CodeLocation types.CodeLocation
+ StartTime time.Time
+}
+
+func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
+ pr := types.ProgressReport{
+ ParallelProcess: report.ParallelProcess,
+ RunningInParallel: isRunningInParallel,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ LeafNodeText: report.LeafNodeText,
+ LeafNodeLocation: report.LeafNodeLocation,
+ SpecStartTime: report.StartTime,
+
+ CurrentNodeType: currentNode.NodeType,
+ CurrentNodeText: currentNode.Text,
+ CurrentNodeLocation: currentNode.CodeLocation,
+ CurrentNodeStartTime: currentNodeStartTime,
+
+ CurrentStepText: currentStep.Message,
+ CurrentStepLocation: currentStep.CodeLocation,
+ CurrentStepStartTime: currentStep.TimelineLocation.Time,
+
+ AdditionalReports: additionalReports,
+
+ CapturedGinkgoWriterOutput: gwOutput,
+ TimelineLocation: timelineLocation,
+ }
+
+ goroutines, err := extractRunningGoroutines()
+ if err != nil {
+ return pr, err
+ }
+ pr.Goroutines = goroutines
+
+ // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
+ packagesOfInterest := map[string]bool{}
+ packageFromFilename := func(filename string) string {
+ return filepath.Dir(filename)
+ }
+ addPackageFor := func(filename string) {
+ if filename != "" {
+ packagesOfInterest[packageFromFilename(filename)] = true
+ }
+ }
+ isPackageOfInterest := func(filename string) bool {
+ stackPackage := packageFromFilename(filename)
+ for packageOfInterest := range packagesOfInterest {
+ if strings.HasPrefix(stackPackage, packageOfInterest) {
+ return true
+ }
+ }
+ return false
+ }
+ for _, location := range report.ContainerHierarchyLocations {
+ addPackageFor(location.FileName)
+ }
+ addPackageFor(report.LeafNodeLocation.FileName)
+ addPackageFor(currentNode.CodeLocation.FileName)
+ addPackageFor(currentStep.CodeLocation.FileName)
+
+ //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
+ specGoRoutineIdx := -1
+ runNodeFunctionCallIdx := -1
+OUTER:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
+ specGoRoutineIdx = goroutineIdx
+ runNodeFunctionCallIdx = functionCallIdx
+ break OUTER
+ }
+ }
+ }
+
+ //Now, we find the first non-Ginkgo function call
+ if specGoRoutineIdx > -1 {
+ for runNodeFunctionCallIdx >= 0 {
+ fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
+ file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
+ // these are all things that could potentially happen from within ginkgo
+ if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
+ runNodeFunctionCallIdx--
+ continue
+ }
+ if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
+
+ }
+ //found it! lets add its package of interest
+ addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
+ break
+ }
+ }
+
+ ginkgoEntryPointIdx := -1
+OUTER_GINKGO_ENTRY_POINT:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for _, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
+ ginkgoEntryPointIdx = goroutineIdx
+ break OUTER_GINKGO_ENTRY_POINT
+ }
+ }
+ }
+
+ // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
+ // Any goroutines with highlighted lines end up in the HighlightGoRoutines
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ if goroutineIdx == ginkgoEntryPointIdx {
+ continue
+ }
+ if goroutineIdx == specGoRoutineIdx {
+ pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
+ }
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if isPackageOfInterest(functionCall.Filename) {
+ goroutine.Stack[functionCallIdx].Highlight = true
+ goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
+ }
+ }
+ }
+
+ if !includeAll {
+ goroutines := []types.Goroutine{pr.SpecGoroutine()}
+ goroutines = append(goroutines, pr.HighlightedGoroutines()...)
+ pr.Goroutines = goroutines
+ }
+
+ return pr, nil
+}
+
+func extractRunningGoroutines() ([]types.Goroutine, error) {
+ var stack []byte
+ for size := 64 * 1024; ; size *= 2 {
+ stack = make([]byte, size)
+ if n := runtime.Stack(stack, true); n < size {
+ stack = stack[:n]
+ break
+ }
+ }
+ r := bufio.NewReader(bytes.NewReader(stack))
+ out := []types.Goroutine{}
+ idx := -1
+ for {
+ line, err := r.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+
+ line = strings.TrimSuffix(line, "\n")
+
+ //skip blank lines
+ if line == "" {
+ continue
+ }
+
+ //parse headers for new goroutine frames
+ if strings.HasPrefix(line, "goroutine") {
+ out = append(out, types.Goroutine{})
+ idx = len(out) - 1
+
+ line = strings.TrimPrefix(line, "goroutine ")
+ line = strings.TrimSuffix(line, ":")
+ fields := strings.SplitN(line, " ", 2)
+ if len(fields) != 2 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
+ }
+ out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
+ }
+
+ out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
+ continue
+ }
+
+ //if we are here we must be at a function call entry in the stack
+ functionCall := types.FunctionCall{
+ Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
+ }
+
+ line, err = r.ReadString('\n')
+ line = strings.TrimSuffix(line, "\n")
+ if err == io.EOF {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
+ }
+ line = strings.TrimLeft(line, " \t")
+ delimiterIdx := strings.LastIndex(line, ":")
+ if delimiterIdx == -1 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
+ }
+ functionCall.Filename = line[:delimiterIdx]
+ line = strings.Split(line[delimiterIdx+1:], " ")[0]
+ lineNumber, err := strconv.ParseInt(line, 10, 64)
+ functionCall.Line = int(lineNumber)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
+ }
+ out[idx].Stack = append(out[idx].Stack, functionCall)
+ }
+
+ return out, nil
+}
+
+func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
+ if filename == "" {
+ return []string{}, 0
+ }
+
+ var lines []string
+ var ok bool
+ if lines, ok = _SOURCE_CACHE[filename]; !ok {
+ sourceRoots := []string{""}
+ sourceRoots = append(sourceRoots, configuredSourceRoots...)
+ var data []byte
+ var err error
+ var found bool
+ for _, root := range sourceRoots {
+ data, err = os.ReadFile(filepath.Join(root, filename))
+ if err == nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return []string{}, 0
+ }
+ lines = strings.Split(string(data), "\n")
+ _SOURCE_CACHE[filename] = lines
+ }
+
+ startIndex := lineNumber - span - 1
+ endIndex := startIndex + span + span + 1
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ if endIndex > len(lines) {
+ endIndex = len(lines)
+ }
+ highlightIndex := lineNumber - 1 - startIndex
+ return lines[startIndex:endIndex], highlightIndex
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
new file mode 100644
index 000000000..61e0ed306
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
@@ -0,0 +1,11 @@
+//go:build freebsd || openbsd || netbsd || darwin || dragonfly
+// +build freebsd openbsd netbsd darwin dragonfly
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
new file mode 100644
index 000000000..ad30de459
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
@@ -0,0 +1,11 @@
+//go:build linux || solaris
+// +build linux solaris
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
new file mode 100644
index 000000000..8c53fe0ad
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
@@ -0,0 +1,10 @@
+//go:build wasm
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
new file mode 100644
index 000000000..0eca2516a
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package internal
+
+import "os"
+
+var PROGRESS_SIGNALS = []os.Signal{}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
new file mode 100644
index 000000000..2c6e260f7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
@@ -0,0 +1,79 @@
+package internal
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ProgressReporterManager struct {
+ lock *sync.Mutex
+ progressReporters map[int]func() string
+ prCounter int
+}
+
+func NewProgressReporterManager() *ProgressReporterManager {
+ return &ProgressReporterManager{
+ progressReporters: map[int]func() string{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ prm.prCounter += 1
+ prCounter := prm.prCounter
+ prm.progressReporters[prCounter] = reporter
+
+ return func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ delete(prm.progressReporters, prCounter)
+ }
+}
+
+func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
+ prm.lock.Lock()
+ keys := []int{}
+ for key := range prm.progressReporters {
+ keys = append(keys, key)
+ }
+ sort.Ints(keys)
+ reporters := []func() string{}
+ for _, key := range keys {
+ reporters = append(reporters, prm.progressReporters[key])
+ }
+ prm.lock.Unlock()
+
+ if len(reporters) == 0 {
+ return nil
+ }
+ out := []string{}
+ for _, reporter := range reporters {
+ reportC := make(chan string, 1)
+ go func() {
+ defer func() {
+ e := recover()
+ if e != nil {
+ failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ reportC <- "failed to query attached progress reporter"
+ }
+ }()
+ reportC <- reporter()
+ }()
+ var report string
+ select {
+ case report = <-reportC:
+ case <-ctx.Done():
+ return out
+ }
+ if strings.TrimSpace(report) != "" {
+ out = append(out, report)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
new file mode 100644
index 000000000..cc351a39b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -0,0 +1,39 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ReportEntry = types.ReportEntry
+
+func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+ out := ReportEntry{
+ Visibility: types.ReportEntryVisibilityAlways,
+ Name: name,
+ Location: cl,
+ Time: time.Now(),
+ }
+ var didSetValue = false
+ for _, arg := range args {
+ switch x := arg.(type) {
+ case types.ReportEntryVisibility:
+ out.Visibility = x
+ case types.CodeLocation:
+ out.Location = x
+ case Offset:
+ out.Location = types.NewCodeLocation(2 + int(x))
+ case time.Time:
+ out.Time = x
+ default:
+ if didSetValue {
+ return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
+ }
+ out.Value = types.WrapEntryValue(arg)
+ didSetValue = true
+ }
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
new file mode 100644
index 000000000..7c4ee5bb7
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
@@ -0,0 +1,87 @@
+package internal
+
+import (
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Spec struct {
+ Nodes Nodes
+ Skip bool
+}
+
+func (s Spec) SubjectID() uint {
+ return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
+}
+
+func (s Spec) Text() string {
+ texts := []string{}
+ for i := range s.Nodes {
+ if s.Nodes[i].Text != "" {
+ texts = append(texts, s.Nodes[i].Text)
+ }
+ }
+ return strings.Join(texts, " ")
+}
+
+func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ return s.Nodes.FirstNodeWithType(nodeTypes)
+}
+
+func (s Spec) FlakeAttempts() int {
+ flakeAttempts := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].FlakeAttempts > 0 {
+ flakeAttempts = s.Nodes[i].FlakeAttempts
+ }
+ }
+
+ return flakeAttempts
+}
+
+func (s Spec) MustPassRepeatedly() int {
+ mustPassRepeatedly := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].MustPassRepeatedly > 0 {
+ mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly
+ }
+ }
+
+ return mustPassRepeatedly
+}
+
+func (s Spec) SpecTimeout() time.Duration {
+ return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
+}
+
+type Specs []Spec
+
+func (s Specs) HasAnySpecsMarkedPending() bool {
+ for i := range s {
+ if s[i].Nodes.HasNodeMarkedPending() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s Specs) CountWithoutSkip() int {
+ n := 0
+ for i := range s {
+ if !s[i].Skip {
+ n += 1
+ }
+ }
+ return n
+}
+
+func (s Specs) AtIndices(indices SpecIndices) Specs {
+ out := make(Specs, len(indices))
+ for i, idx := range indices {
+ out[i] = s[idx]
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
new file mode 100644
index 000000000..2d2ea2fc3
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -0,0 +1,47 @@
+package internal
+
+import (
+ "context"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SpecContext interface {
+ context.Context
+
+ SpecReport() types.SpecReport
+ AttachProgressReporter(func() string) func()
+}
+
+type specContext struct {
+ context.Context
+ *ProgressReporterManager
+
+ cancel context.CancelCauseFunc
+
+ suite *Suite
+}
+
+/*
+SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
+
+Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
+
+This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
+*/
+func NewSpecContext(suite *Suite) *specContext {
+ ctx, cancel := context.WithCancelCause(context.Background())
+ sc := &specContext{
+ cancel: cancel,
+ suite: suite,
+ ProgressReporterManager: NewProgressReporterManager(),
+ }
+ ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
+ sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
+
+ return sc
+}
+
+func (sc *specContext) SpecReport() types.SpecReport {
+ return sc.suite.CurrentSpecReport()
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go
new file mode 100644
index 000000000..2d0bcc914
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_patch.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func (s Spec) CodeLocations() []types.CodeLocation {
+ return s.Nodes.CodeLocations()
+}
+
+func (s Spec) AppendText(text string) {
+ s.Nodes[len(s.Nodes)-1].Text += text
+}
+
+func (s Spec) Labels() []string {
+ var labels []string
+ for _, n := range s.Nodes {
+ labels = append(labels, n.Labels...)
+ }
+
+ return labels
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
new file mode 100644
index 000000000..12e50b8a9
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -0,0 +1,1053 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/net/context"
+)
+
+type Phase uint
+
+const (
+ PhaseBuildTopLevel Phase = iota
+ PhaseBuildTree
+ PhaseRun
+)
+
+var PROGRESS_REPORTER_DEADLING = 5 * time.Second
+
+type Suite struct {
+ tree *TreeNode
+ topLevelContainers Nodes
+
+ *ProgressReporterManager
+
+ phase Phase
+
+ suiteNodes Nodes
+ cleanupNodes Nodes
+
+ failer *Failer
+ reporter reporters.Reporter
+ writer WriterInterface
+ outputInterceptor OutputInterceptor
+ interruptHandler interrupt_handler.InterruptHandlerInterface
+ config types.SuiteConfig
+ deadline time.Time
+
+ skipAll bool
+ report types.Report
+ currentSpecReport types.SpecReport
+ currentNode Node
+ currentNodeStartTime time.Time
+
+ currentSpecContext *specContext
+
+ currentByStep types.SpecEvent
+ timelineOrder int
+
+ /*
+ We don't need to lock around all operations. Just those that *could* happen concurrently.
+
+ Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
+
+ However, there are some operations that can happen concurrently:
+
+ - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
+ - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
+ */
+ selectiveLock *sync.Mutex
+
+ client parallel_support.Client
+
+ annotateFn AnnotateFunc
+}
+
+func NewSuite() *Suite {
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+
+ selectiveLock: &sync.Mutex{},
+ }
+}
+
+func (suite *Suite) Clone() (*Suite, error) {
+ if suite.phase != PhaseBuildTopLevel {
+ return nil, fmt.Errorf("cannot clone suite after tree has been built")
+ }
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+ topLevelContainers: suite.topLevelContainers.Clone(),
+ suiteNodes: suite.suiteNodes.Clone(),
+ selectiveLock: &sync.Mutex{},
+ }, nil
+}
+
+func (suite *Suite) BuildTree() error {
+ // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
+ // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
+ suite.phase = PhaseBuildTree
+ for _, topLevelContainer := range suite.topLevelContainers {
+ err := suite.PushNode(topLevelContainer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ if suite.annotateFn != nil {
+ for _, spec := range specs {
+ suite.annotateFn(spec.Text(), spec)
+ }
+ }
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+
+ suite.phase = PhaseRun
+ suite.client = client
+ suite.failer = failer
+ suite.reporter = reporter
+ suite.writer = writer
+ suite.outputInterceptor = outputInterceptor
+ suite.interruptHandler = interruptHandler
+ suite.config = suiteConfig
+
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suite.config.Timeout)
+ }
+
+ cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
+
+ success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+
+ cancelProgressHandler()
+
+ return success, hasProgrammaticFocus
+}
+
+func (suite *Suite) InRunPhase() bool {
+ return suite.phase == PhaseRun
+}
+
+/*
+ Tree Construction methods
+
+ PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
+*/
+
+func (suite *Suite) PushNode(node Node) error {
+ if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ return suite.pushCleanupNode(node)
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ return suite.pushSuiteNode(node)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ if node.MarkedSerial {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
+ return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.MarkedContinueOnFailure {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
+ }
+ }
+
+ if node.NodeType == types.NodeTypeContainer {
+ // During PhaseBuildTopLevel we only track the top level containers without entering them
+ // We only enter the top level container nodes during PhaseBuildTree
+ //
+ // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
+ // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
+ // is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
+ if suite.phase == PhaseBuildTopLevel {
+ suite.topLevelContainers = append(suite.topLevelContainers, node)
+ return nil
+ }
+ if suite.phase == PhaseBuildTree {
+ parentTree := suite.tree
+ suite.tree = &TreeNode{Node: node}
+ parentTree.AppendChild(suite.tree)
+ err := func() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
+ }
+ }()
+ node.Body(nil)
+ return err
+ }()
+ suite.tree = parentTree
+ return err
+ }
+ } else {
+ suite.tree.AppendChild(&TreeNode{Node: node})
+ return nil
+ }
+
+ return nil
+}
+
+func (suite *Suite) pushSuiteNode(node Node) error {
+ if suite.phase == PhaseBuildTree {
+ return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if len(existingBefores) > 0 {
+ return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
+ }
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if len(existingAfters) > 0 {
+ return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
+ }
+ }
+
+ suite.suiteNodes = append(suite.suiteNodes, node)
+ return nil
+}
+
+func (suite *Suite) pushCleanupNode(node Node) error {
+ if suite.phase != PhaseRun || suite.currentNode.IsZero() {
+ return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
+ }
+
+ switch suite.currentNode.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ node.NodeType = types.NodeTypeCleanupAfterSuite
+ case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
+ node.NodeType = types.NodeTypeCleanupAfterAll
+ case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
+ case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
+ default:
+ node.NodeType = types.NodeTypeCleanupAfterEach
+ }
+
+ node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
+ node.NestingLevel = suite.currentNode.NestingLevel
+ suite.selectiveLock.Lock()
+ suite.cleanupNodes = append(suite.cleanupNodes, node)
+ suite.selectiveLock.Unlock()
+
+ return nil
+}
+
+func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ suite.timelineOrder += 1
+ return types.TimelineLocation{
+ Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
+ Order: suite.timelineOrder,
+ Time: time.Now(),
+ }
+}
+
+func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
+ event.TimelineLocation = suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+ return event
+}
+
+func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
+ event := startEvent
+ event.SpecEventType = eventType
+ event.TimelineLocation = suite.generateTimelineLocation()
+ event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+}
+
+func (suite *Suite) By(text string, callback ...func()) error {
+ cl := types.NewCodeLocation(2)
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.ByNotDuringRunPhase(cl)
+ }
+
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventByStart,
+ CodeLocation: cl,
+ Message: text,
+ })
+ suite.selectiveLock.Lock()
+ suite.currentByStep = event
+ suite.selectiveLock.Unlock()
+
+ if len(callback) == 1 {
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ suite.handleSpecEventEnd(types.SpecEventByEnd, event)
+ }()
+ callback[0]()
+ } else if len(callback) > 1 {
+ panic("just one callback per By, please")
+ }
+ return nil
+}
+
+/*
+Spec Running methods - used during PhaseRun
+*/
+func (suite *Suite) CurrentSpecReport() types.SpecReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentSpecReport
+ if suite.writer != nil {
+ report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ }
+ report.ReportEntries = make([]ReportEntry, len(report.ReportEntries))
+ copy(report.ReportEntries, suite.currentSpecReport.ReportEntries)
+ return report
+}
+
+// Only valid in the preview context. In general suite.report only includes
+// the specs run by _this_ node - it is only at the end of the suite that
+// the parallel reports are aggregated. However in the preview context we run
+// in series and
+func (suite *Suite) GetPreviewReport() types.Report {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ return suite.report
+}
+
+func (suite *Suite) AddReportEntry(entry ReportEntry) error {
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
+ }
+ entry.TimelineLocation = suite.generateTimelineLocation()
+ entry.Time = entry.TimelineLocation.Time
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitReportEntry(entry)
+ return nil
+}
+
+func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
+ timelineLocation := suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
+ defer cancel()
+ var additionalReports []string
+ if suite.currentSpecContext != nil {
+ additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
+ }
+ additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
+ gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
+ pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
+
+ if err != nil {
+ fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
+ }
+ return pr
+}
+
+func (suite *Suite) handleProgressSignal() {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}You've requested a progress report:{{/}}"
+ suite.emitProgressReport(report)
+}
+
+func (suite *Suite) emitProgressReport(report types.ProgressReport) {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.EmitProgressReport(report)
+ if suite.isRunningInParallel() {
+ err := suite.client.PostEmitProgressReport(report)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ }
+}
+
+func (suite *Suite) isRunningInParallel() bool {
+ return suite.config.ParallelTotal > 1
+}
+
+func (suite *Suite) processCurrentSpecReport() {
+ suite.reporter.DidRun(suite.currentSpecReport)
+ if suite.isRunningInParallel() {
+ suite.client.PostDidRun(suite.currentSpecReport)
+ }
+ suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
+
+ if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.report.SuiteSucceeded = false
+ if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
+ suite.skipAll = true
+ if suite.isRunningInParallel() {
+ suite.client.PostAbort()
+ }
+ }
+ }
+}
+
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+ numSpecsThatWillBeRun := specs.CountWithoutSkip()
+
+ suite.report = types.Report{
+ SuitePath: suitePath,
+ SuiteDescription: description,
+ SuiteLabels: suiteLabels,
+ SuiteConfig: suite.config,
+ SuiteHasProgrammaticFocus: hasProgrammaticFocus,
+ PreRunStats: types.PreRunStats{
+ TotalSpecs: len(specs),
+ SpecsThatWillRun: numSpecsThatWillBeRun,
+ },
+ StartTime: time.Now(),
+ }
+
+ suite.reporter.SuiteWillBegin(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteWillBegin(suite.report)
+ }
+
+ suite.report.SuiteSucceeded = true
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
+
+ ranBeforeSuite := suite.report.SuiteSucceeded
+ if suite.report.SuiteSucceeded {
+ suite.runBeforeSuite(numSpecsThatWillBeRun)
+ }
+
+ if suite.report.SuiteSucceeded {
+ groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
+ nextIndex := MakeIncrementingIndexCounter()
+ if suite.isRunningInParallel() {
+ nextIndex = suite.client.FetchNextCounter
+ }
+
+ for {
+ groupedSpecIdx, err := nextIndex()
+ if err != nil {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
+ suite.report.SuiteSucceeded = false
+ break
+ }
+
+ if groupedSpecIdx >= len(groupedSpecIndices) {
+ if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
+ groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
+ suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ continue
+ }
+ break
+ }
+
+ // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
+ // we encapsulate that complexity in the notion of a Group that can run
+ // Group is really just an extension of suite so it gets passed a suite and has access to all its internals
+ // Note that group is stateful and intended for single use!
+ newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
+ }
+
+ if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
+ suite.report.SuiteSucceeded = false
+ }
+
+ if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set")
+ suite.report.SuiteSucceeded = false
+ }
+ }
+
+ if ranBeforeSuite {
+ suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Interrupted() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
+ suite.report.SuiteSucceeded = false
+ }
+ suite.report.EndTime = time.Now()
+ suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
+ if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
+ suite.report.SuiteSucceeded = false
+ }
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
+ suite.reporter.SuiteDidEnd(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteDidEnd(suite.report)
+ }
+
+ return suite.report.SuiteSucceeded
+}
+
+func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
+ beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: beforeSuiteNode.NodeType,
+ LeafNodeLocation: beforeSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(beforeSuiteNode)
+ if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
+ suite.skipAll = true
+ }
+ suite.processCurrentSpecReport()
+ }
+}
+
+func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
+ afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: afterSuiteNode.NodeType,
+ LeafNodeLocation: afterSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(afterSuiteNode)
+ suite.processCurrentSpecReport()
+ }
+
+ afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
+ if len(afterSuiteCleanup) > 0 {
+ for _, cleanupNode := range afterSuiteCleanup {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: cleanupNode.NodeType,
+ LeafNodeLocation: cleanupNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(cleanupNode)
+ suite.processCurrentSpecReport()
+ }
+ }
+}
+
+func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
+ nodes := spec.Nodes.WithType(nodeType)
+ if nodeType == types.NodeTypeReportAfterEach {
+ nodes = nodes.SortedByDescendingNestingLevel()
+ }
+ if nodeType == types.NodeTypeReportBeforeEach {
+ nodes = nodes.SortedByAscendingNestingLevel()
+ }
+ if len(nodes) == 0 {
+ return
+ }
+
+ for i := range nodes {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ report := suite.currentSpecReport
+ nodes[i].Body = func(ctx SpecContext) {
+ nodes[i].ReportEachBody(ctx, report)
+ }
+ state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
+
+ // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
+ // Also, if the reporter is every aborted - always override the state to propagate the abort
+ if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
+ suite.currentSpecReport.State = state
+ suite.currentSpecReport.Failure = failure
+ }
+ suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ }
+}
+
+func (suite *Suite) runSuiteNode(node Node) {
+ if suite.config.DryRun {
+ suite.currentSpecReport.State = types.SpecStatePassed
+ return
+ }
+
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ var err error
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ case types.NodeTypeCleanupAfterSuite:
+ if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedBeforeSuite:
+ var data []byte
+ var runAllProcs bool
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+ node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
+ node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutput()
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
+ } else {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
+ }
+ }
+ runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
+ } else {
+ var proc1State types.SpecState
+ proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
+ switch proc1State {
+ case types.SpecStatePassed:
+ runAllProcs = true
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
+ err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
+ case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
+ suite.currentSpecReport.State = proc1State
+ }
+ }
+ if runAllProcs {
+ node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
+ node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedAfterSuite:
+ node.Body = node.SynchronizedAfterSuiteAllProcsBody
+ node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+
+ node.Body = node.SynchronizedAfterSuiteProc1Body
+ node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
+ state, failure := suite.runNode(node, time.Time{}, "")
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
+ }
+ }
+ }
+ }
+
+ if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ }
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
+ nodes := suite.suiteNodes.WithType(nodeType)
+ // only run ReportAfterSuite on proc 1
+ if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
+ return
+ }
+ // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
+ state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
+ if err != nil || state.Is(types.SpecStateFailed) {
+ suite.report.SuiteSucceeded = false
+ }
+ return
+ }
+
+ for _, node := range nodes {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: node.NodeType,
+ LeafNodeLocation: node.CodeLocation,
+ LeafNodeText: node.Text,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runReportSuiteNode(node, suite.report)
+ suite.processCurrentSpecReport()
+ }
+
+ // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
+ if suite.report.SuiteSucceeded {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
+ } else {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
+ }
+ }
+}
+
+func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
+ // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
+ if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
+ aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
+ if err != nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ return
+ }
+ report = report.Add(aggregatedReport)
+ }
+
+ node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+
+ suite.selectiveLock.Lock()
+ suite.currentNode = node
+ suite.currentNodeStartTime = time.Now()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentNode = Node{}
+ suite.currentNodeStartTime = time.Time{}
+ suite.selectiveLock.Unlock()
+ }()
+
+ if text == "" {
+ text = "TOP-LEVEL"
+ }
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventNodeStart,
+ NodeType: node.NodeType,
+ Message: text,
+ CodeLocation: node.CodeLocation,
+ })
+ defer func() {
+ suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
+ }()
+
+ var failure types.Failure
+ failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
+ if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ failure.FailureNodeContext = types.FailureNodeIsLeafNode
+ } else if node.NestingLevel <= 0 {
+ failure.FailureNodeContext = types.FailureNodeAtTopLevel
+ } else {
+ failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
+ }
+ var outcome types.SpecState
+
+ gracePeriod := suite.config.GracePeriod
+ if node.GracePeriod >= 0 {
+ gracePeriod = node.GracePeriod
+ }
+
+ now := time.Now()
+ deadline := suite.deadline
+ timeoutInPlay := "suite"
+ if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
+ deadline = specDeadline
+ timeoutInPlay = "spec"
+ }
+ if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ }
+ if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
+ // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ if node.NodeTimeout > 0 {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ } else {
+ deadline = now.Add(gracePeriod)
+ timeoutInPlay = "grace period"
+ }
+ }
+
+ if !node.HasContext {
+ // this maps onto the pre-context behavior:
+ // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
+ // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
+ gracePeriod = 0
+ }
+
+ sc := NewSpecContext(suite)
+ defer sc.cancel(fmt.Errorf("spec has finished"))
+
+ suite.selectiveLock.Lock()
+ suite.currentSpecContext = sc
+ suite.selectiveLock.Unlock()
+
+ var deadlineChannel <-chan time.Time
+ if !deadline.IsZero() {
+ deadlineChannel = time.After(deadline.Sub(now))
+ }
+ var gracePeriodChannel <-chan time.Time
+
+ outcomeC := make(chan types.SpecState)
+ failureC := make(chan types.Failure)
+
+ go func() {
+ finished := false
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
+ }
+
+ outcomeFromRun, failureFromRun := suite.failer.Drain()
+ failureFromRun.TimelineLocation = suite.generateTimelineLocation()
+ outcomeC <- outcomeFromRun
+ failureC <- failureFromRun
+ }()
+
+ node.Body(sc)
+ finished = true
+ }()
+
+ // progress polling timer and channel
+ var emitProgressNow <-chan time.Time
+ var progressPoller *time.Timer
+ var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
+ if node.PollProgressAfter >= 0 {
+ pollProgressAfter = node.PollProgressAfter
+ }
+ if node.PollProgressInterval >= 0 {
+ pollProgressInterval = node.PollProgressInterval
+ }
+ if pollProgressAfter > 0 {
+ progressPoller = time.NewTimer(pollProgressAfter)
+ emitProgressNow = progressPoller.C
+ defer progressPoller.Stop()
+ }
+
+ // now we wait for an outcome, an interrupt, a timeout, or a progress poll
+ for {
+ select {
+ case outcomeFromRun := <-outcomeC:
+ failureFromRun := <-failureC
+ if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
+ // we've already been interrupted/timed out. we just managed to actually exit
+ // before the grace period elapsed
+ // if we have a failure message we attach it as an additional failure
+ if outcomeFromRun != types.SpecStatePassed {
+ additionalFailure := types.AdditionalFailure{
+ State: outcomeFromRun,
+ Failure: failure, // we make a copy - this will include all the configuration set up above...
+ }
+ // ...and then we update the failure with the details from failureFromRun
+ additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ additionalFailure.Failure.ProgressReport = types.ProgressReport{}
+ if outcome == types.SpecStateTimedout {
+ additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
+ } else {
+ additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
+ }
+ suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
+ failure.AdditionalFailure = &additionalFailure
+ }
+ return outcome, failure
+ }
+ if outcomeFromRun.Is(types.SpecStatePassed) {
+ return outcomeFromRun, types.Failure{}
+ } else {
+ failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ suite.reporter.EmitFailure(outcomeFromRun, failure)
+ return outcomeFromRun, failure
+ }
+ case <-gracePeriodChannel:
+ if node.HasContext && outcome.Is(types.SpecStateTimedout) {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
+ suite.emitProgressReport(report)
+ }
+ return outcome, failure
+ case <-deadlineChannel:
+ // we're out of time - the outcome is a timeout and we capture the failure and progress report
+ outcome = types.SpecStateTimedout
+ failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
+ failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
+ deadlineChannel = nil
+ suite.reporter.EmitFailure(outcome, failure)
+
+ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where
+ // the spec is actually stuck
+ sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
+ // and now we wait for the grace period
+ gracePeriodChannel = time.After(gracePeriod)
+ case <-interruptStatus.Channel:
+ interruptStatus = suite.interruptHandler.Status()
+ // ignore interruption from other process if we are cleaning up or reporting
+ if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
+ node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ continue
+ }
+
+ deadlineChannel = nil // don't worry about deadlines, time's up now
+
+ failureTimelineLocation := suite.generateTimelineLocation()
+ progressReport := suite.generateProgressReport(true)
+
+ if outcome == types.SpecStateInvalid {
+ outcome = types.SpecStateInterrupted
+ failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
+ if interruptStatus.ShouldIncludeProgressReport() {
+ failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
+ }
+ suite.reporter.EmitFailure(outcome, failure)
+ }
+
+ progressReport = progressReport.WithoutOtherGoroutines()
+ sc.cancel(fmt.Errorf(interruptStatus.Message()))
+
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ if interruptStatus.ShouldIncludeProgressReport() {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
+ suite.emitProgressReport(progressReport)
+ }
+ return outcome, failure
+ }
+ if interruptStatus.ShouldIncludeProgressReport() {
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ }
+ suite.emitProgressReport(progressReport)
+ }
+
+ if gracePeriodChannel == nil {
+ // we haven't given grace yet... so let's
+ gracePeriodChannel = time.After(gracePeriod)
+ } else {
+ // we've already given grace. time's up. now.
+ return outcome, failure
+ }
+ case <-emitProgressNow:
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}Automatically polling progress:{{/}}"
+ suite.emitProgressReport(report)
+ if pollProgressInterval > 0 {
+ progressPoller.Reset(pollProgressInterval)
+ }
+ }
+ }
+}
+
+// TODO: search for usages and consider if reporter.EmitFailure() is necessary
+func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
+ return types.Failure{
+ Message: message,
+ Location: node.CodeLocation,
+ TimelineLocation: suite.generateTimelineLocation(),
+ FailureNodeContext: types.FailureNodeIsLeafNode,
+ FailureNodeType: node.NodeType,
+ FailureNodeLocation: node.CodeLocation,
+ }
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go
new file mode 100644
index 000000000..29eae0283
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite_patch.go
@@ -0,0 +1,71 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type AnnotateFunc func(testName string, test types.TestSpec)
+
+func (suite *Suite) SetAnnotateFn(fn AnnotateFunc) {
+ suite.annotateFn = fn
+}
+
+func (suite *Suite) GetReport() types.Report {
+ return suite.report
+}
+
+func (suite *Suite) WalkTests(fn AnnotateFunc) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ for _, spec := range specs {
+ fn(spec.Text(), spec)
+ }
+}
+
+func (suite *Suite) InPhaseBuildTree() bool {
+ return suite.phase == PhaseBuildTree
+}
+
+func (suite *Suite) ClearBeforeAndAfterSuiteNodes() {
+ // Don't build the tree multiple times, it results in multiple initing of tests
+ if !suite.InPhaseBuildTree() {
+ suite.BuildTree()
+ }
+ newNodes := Nodes{}
+ for _, node := range suite.suiteNodes {
+ if node.NodeType == types.NodeTypeBeforeSuite || node.NodeType == types.NodeTypeAfterSuite || node.NodeType == types.NodeTypeSynchronizedBeforeSuite || node.NodeType == types.NodeTypeSynchronizedAfterSuite {
+ continue
+ }
+ newNodes = append(newNodes, node)
+ }
+ suite.suiteNodes = newNodes
+}
+
+func (suite *Suite) RunSpec(spec types.TestSpec, suiteLabels Labels, suiteDescription, suitePath string, failer *Failer, writer WriterInterface, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+
+ suite.phase = PhaseRun
+ suite.client = nil
+ suite.failer = failer
+ suite.reporter = reporters.NewDefaultReporter(reporterConfig, writer)
+ suite.writer = writer
+ suite.outputInterceptor = NoopOutputInterceptor{}
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suiteConfig.Timeout)
+ }
+ suite.interruptHandler = interrupt_handler.NewInterruptHandler(nil)
+ suite.config = suiteConfig
+
+ success := suite.runSpecs(suiteDescription, suiteLabels, suitePath, false, []Spec{spec.(Spec)})
+
+ return success, false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 000000000..73e265565
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,210 @@
+package testingtproxy
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type failFunc func(message string, callerSkip ...int)
+type skipFunc func(message string, callerSkip ...int)
+type cleanupFunc func(args ...any)
+type reportFunc func() types.SpecReport
+type addReportEntryFunc func(names string, args ...any)
+type ginkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+}
+type ginkgoRecoverFunc func()
+type attachProgressReporterFunc func(func() string) func()
+
+func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
+ return &ginkgoTestingTProxy{
+ fail: fail,
+ offset: offset,
+ writer: writer,
+ skip: skip,
+ cleanup: cleanup,
+ report: report,
+ addReportEntry: addReportEntry,
+ ginkgoRecover: ginkgoRecover,
+ attachProgressReporter: attachProgressReporter,
+ randomSeed: randomSeed,
+ parallelProcess: parallelProcess,
+ parallelTotal: parallelTotal,
+ f: formatter.NewWithNoColorBool(noColor),
+ }
+}
+
+type ginkgoTestingTProxy struct {
+ fail failFunc
+ skip skipFunc
+ cleanup cleanupFunc
+ report reportFunc
+ offset int
+ writer ginkgoWriterInterface
+ addReportEntry addReportEntryFunc
+ ginkgoRecover ginkgoRecoverFunc
+ attachProgressReporter attachProgressReporterFunc
+ randomSeed int64
+ parallelProcess int
+ parallelTotal int
+ f formatter.Formatter
+}
+
+// basic testing.T support
+
+func (t *ginkgoTestingTProxy) Cleanup(f func()) {
+ t.cleanup(f, internal.Offset(1))
+}
+
+func (t *ginkgoTestingTProxy) Setenv(key, value string) {
+ originalValue, exists := os.LookupEnv(key)
+ if exists {
+ t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
+ } else {
+ t.cleanup(os.Unsetenv, key, internal.Offset(1))
+ }
+
+ err := os.Setenv(key, value)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+ return t.report().Failed()
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Helper() {
+ types.MarkAsHelper(1)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+ fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+ t.Log(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) Name() string {
+ return t.report().FullText()
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+ // No-op
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+ t.skip(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+ t.skip("skip", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+ t.skip(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+ return t.report().State.Is(types.SpecStateSkipped)
+}
+
+func (t *ginkgoTestingTProxy) TempDir() string {
+ tmpDir, err := os.MkdirTemp("", "ginkgo")
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
+ return ""
+ }
+ t.cleanup(os.RemoveAll, tmpDir)
+
+ return tmpDir
+}
+
+// FullGinkgoTInterface
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) Print(a ...any) {
+ t.writer.Print(a...)
+}
+func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
+ t.writer.Printf(format, a...)
+}
+func (t *ginkgoTestingTProxy) Println(a ...any) {
+ t.writer.Println(a...)
+}
+func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
+ return t.f.F(format, args...)
+}
+func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
+ return t.f.Fi(indentation, format, args...)
+}
+func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
+ return t.f.Fiw(indentation, maxWidth, format, args...)
+}
+func (t *ginkgoTestingTProxy) RenderTimeline() string {
+ return reporters.RenderTimeline(t.report(), false)
+}
+func (t *ginkgoTestingTProxy) GinkgoRecover() {
+ t.ginkgoRecover()
+}
+func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
+ finalArgs := []any{internal.Offset(1)}
+ t.cleanup(append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) RandomSeed() int64 {
+ return t.randomSeed
+}
+func (t *ginkgoTestingTProxy) ParallelProcess() int {
+ return t.parallelProcess
+}
+func (t *ginkgoTestingTProxy) ParallelTotal() int {
+ return t.parallelTotal
+}
+func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
+ return t.attachProgressReporter(f)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
new file mode 100644
index 000000000..f9d1eeb8f
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
@@ -0,0 +1,77 @@
+package internal
+
+import "github.com/onsi/ginkgo/v2/types"
+
+type TreeNode struct {
+ Node Node
+ Parent *TreeNode
+ Children TreeNodes
+}
+
+func (tn *TreeNode) AppendChild(child *TreeNode) {
+ tn.Children = append(tn.Children, child)
+ child.Parent = tn
+}
+
+func (tn *TreeNode) AncestorNodeChain() Nodes {
+ if tn.Parent == nil || tn.Parent.Node.IsZero() {
+ return Nodes{tn.Node}
+ }
+ return append(tn.Parent.AncestorNodeChain(), tn.Node)
+}
+
+type TreeNodes []*TreeNode
+
+func (tn TreeNodes) Nodes() Nodes {
+ out := make(Nodes, len(tn))
+ for i := range tn {
+ out[i] = tn[i].Node
+ }
+ return out
+}
+
+func (tn TreeNodes) WithID(id uint) *TreeNode {
+ for i := range tn {
+ if tn[i].Node.ID == id {
+ return tn[i]
+ }
+ }
+
+ return nil
+}
+
+func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
+ var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
+ walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
+ tests := Specs{}
+
+ nodes := make(Nodes, len(trees))
+ for i := range trees {
+ nodes[i] = trees[i].Node
+ nodes[i].NestingLevel = nestingLevel
+ }
+
+ for i := range nodes {
+ if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
+ continue
+ }
+ leftNodes, rightNodes := nodes.SplitAround(nodes[i])
+ leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
+ rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
+
+ leftNodes = lNodes.CopyAppend(leftNodes...)
+ rightNodes = rightNodes.CopyAppend(rNodes...)
+
+ if nodes[i].NodeType.Is(types.NodeTypeIt) {
+ tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
+ } else {
+ treeNode := trees.WithID(nodes[i].ID)
+ tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
+ }
+ }
+
+ return tests
+ }
+
+ return walkTree(0, Nodes{}, Nodes{}, tree.Children)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
new file mode 100644
index 000000000..aab42d5fb
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -0,0 +1,144 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+type WriterMode uint
+
+const (
+ WriterModeStreamAndBuffer WriterMode = iota
+ WriterModeBufferOnly
+)
+
+type WriterInterface interface {
+ io.Writer
+
+ Truncate()
+ Bytes() []byte
+ Len() int
+}
+
+// Writer implements WriterInterface and GinkgoWriterInterface
+type Writer struct {
+ buffer *bytes.Buffer
+ outWriter io.Writer
+ lock *sync.Mutex
+ mode WriterMode
+
+ streamIndent []byte
+ indentNext bool
+
+ teeWriters []io.Writer
+}
+
+func NewWriter(outWriter io.Writer) *Writer {
+ return &Writer{
+ buffer: &bytes.Buffer{},
+ lock: &sync.Mutex{},
+ outWriter: outWriter,
+ mode: WriterModeStreamAndBuffer,
+ streamIndent: []byte(" "),
+ indentNext: true,
+ }
+}
+
+func (w *Writer) SetMode(mode WriterMode) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.mode = mode
+}
+
+func (w *Writer) Len() int {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return w.buffer.Len()
+}
+
+var newline = []byte("\n")
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for _, teeWriter := range w.teeWriters {
+ teeWriter.Write(b)
+ }
+
+ if w.mode == WriterModeStreamAndBuffer {
+ line, remaining, found := []byte{}, b, false
+ for len(remaining) > 0 {
+ line, remaining, found = bytes.Cut(remaining, newline)
+ if len(line) > 0 {
+ if w.indentNext {
+ w.outWriter.Write(w.streamIndent)
+ w.indentNext = false
+ }
+ w.outWriter.Write(line)
+ }
+ if found {
+ w.outWriter.Write(newline)
+ w.indentNext = true
+ }
+ }
+ }
+ return w.buffer.Write(b)
+}
+
+func (w *Writer) Truncate() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.buffer.Reset()
+}
+
+func (w *Writer) Bytes() []byte {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ b := w.buffer.Bytes()
+ copied := make([]byte, len(b))
+ copy(copied, b)
+ return copied
+}
+
+// GinkgoWriterInterface
+func (w *Writer) TeeTo(writer io.Writer) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = append(w.teeWriters, writer)
+}
+
+func (w *Writer) ClearTeeWriters() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = []io.Writer{}
+}
+
+func (w *Writer) Print(a ...interface{}) {
+ fmt.Fprint(w, a...)
+}
+
+func (w *Writer) Printf(format string, a ...interface{}) {
+ fmt.Fprintf(w, format, a...)
+}
+
+func (w *Writer) Println(a ...interface{}) {
+ fmt.Fprintln(w, a...)
+}
+
+func GinkgoLogrFunc(writer *Writer) logr.Logger {
+ return funcr.New(func(prefix, args string) {
+ if prefix == "" {
+ writer.Printf("%s\n", args)
+ } else {
+ writer.Printf("%s %s\n", prefix, args)
+ }
+ }, funcr.Options{})
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
new file mode 100644
index 000000000..480730486
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -0,0 +1,788 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type DefaultReporter struct {
+ conf types.ReporterConfig
+ writer io.Writer
+
+ // managing the emission stream
+ lastCharWasNewline bool
+ lastEmissionWasDelimiter bool
+
+ // rendering
+ specDenoter string
+ retryDenoter string
+ formatter formatter.Formatter
+
+ runningInParallel bool
+ lock *sync.Mutex
+}
+
+func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := NewDefaultReporter(conf, writer)
+ reporter.formatter = formatter.New(formatter.ColorModePassthrough)
+
+ return reporter
+}
+
+func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := &DefaultReporter{
+ conf: conf,
+ writer: writer,
+
+ lastCharWasNewline: true,
+ lastEmissionWasDelimiter: false,
+
+ specDenoter: "•",
+ retryDenoter: "↺",
+ formatter: formatter.NewWithNoColorBool(conf.NoColor),
+ lock: &sync.Mutex{},
+ }
+ if runtime.GOOS == "windows" {
+ reporter.specDenoter = "+"
+ reporter.retryDenoter = "R"
+ }
+
+ return reporter
+}
+
+/* The Reporter Interface */
+
+func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
+ r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
+ if len(report.SuiteLabels) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
+ }
+ r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
+ }
+ } else {
+ banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
+ r.emitBlock(banner)
+ bannerWidth := len(banner)
+ if len(report.SuiteLabels) > 0 {
+ labels := strings.Join(report.SuiteLabels, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
+ if len(labels)+2 > bannerWidth {
+ bannerWidth = len(labels) + 2
+ }
+ }
+ r.emitBlock(strings.Repeat("=", bannerWidth))
+
+ out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
+ if report.SuiteConfig.RandomizeAllSpecs {
+ out += r.f(" - will randomize all specs")
+ }
+ r.emitBlock(out)
+ r.emit("\n")
+ r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
+ }
+ }
+}
+
+func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
+ failures := report.SpecReports.WithState(types.SpecStateFailureStates)
+ if len(failures) > 0 {
+ r.emitBlock("\n")
+ if len(failures) > 1 {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
+ } else {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}"))
+ }
+ for _, specReport := range failures {
+ highlightColor, heading := "{{red}}", "[FAIL]"
+ switch specReport.State {
+ case types.SpecStatePanicked:
+ highlightColor, heading = "{{magenta}}", "[PANICKED!]"
+ case types.SpecStateAborted:
+ highlightColor, heading = "{{coral}}", "[ABORTED]"
+ case types.SpecStateTimedout:
+ highlightColor, heading = "{{orange}}", "[TIMEDOUT]"
+ case types.SpecStateInterrupted:
+ highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
+ }
+ locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
+ r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
+ }
+ }
+
+ //summarize the suite
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
+ r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
+ return
+ }
+
+ r.emitBlock("\n")
+ color, status := "{{green}}{{bold}}", "SUCCESS!"
+ if !report.SuiteSucceeded {
+ color, status = "{{red}}{{bold}}", "FAIL!"
+ }
+
+ specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
+ r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
+ specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
+ report.PreRunStats.TotalSpecs,
+ report.RunTime.Seconds()),
+ )
+
+ switch len(report.SpecialSuiteFailureReasons) {
+ case 0:
+ r.emit(r.f(color+"%s{{/}} -- ", status))
+ case 1:
+ r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
+ default:
+ r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
+ }
+
+ if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
+ r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
+ } else {
+ r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
+ r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
+ if specs.CountOfFlakedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
+ }
+ if specs.CountOfRepeatedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs()))
+ }
+ r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
+ r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
+ }
+}
+
+func (r *DefaultReporter) WillRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
+ return
+ }
+
+ r.emitDelimiter(0)
+ r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
+}
+
+func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
+ r.emitBlock("\n")
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::group::%s", sectionName))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
+ }
+ fn()
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::endgroup::"))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
+ }
+
+}
+
+func (r *DefaultReporter) DidRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ inParallel := report.RunningInParallel
+
+ //should we completely omit this spec?
+ if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
+ return
+ }
+
+ header := r.specDenoter
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("[%s]", report.LeafNodeType)
+ }
+ highlightColor := r.highlightColorForState(report.State)
+
+ // have we already been streaming the timeline?
+ timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
+
+ // should we show the timeline?
+ var timeline types.Timeline
+ showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
+ if showTimeline {
+ timeline = report.Timeline().WithoutHiddenReportEntries()
+ keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
+ (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
+ (report.Failed() && r.conf.ShowNodeEvents)
+ if !keepVeryVerboseSpecEvents {
+ timeline = timeline.WithoutVeryVerboseSpecEvents()
+ }
+ if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
+ // the timeline is completely empty - don't show it
+ showTimeline = false
+ }
+ if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
+ //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
+ failure, isFailure := timeline[0].(types.Failure)
+ if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
+ showTimeline = false
+ }
+ }
+ }
+
+ // should we have a separate section for always-visible reports?
+ showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
+
+ // should we have a separate section for captured stdout/stderr
+ showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
+
+ // given all that - do we have any actual content to show? or are we a single denoter in a stream?
+ reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
+
+ // should we show a runtime?
+ includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
+
+ // should we show the codelocation block?
+ showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
+
+ switch report.State {
+ case types.SpecStatePassed:
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
+ return
+ }
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("%s PASSED", header)
+ }
+ if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
+ header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
+ }
+ case types.SpecStatePending:
+ header = "P"
+ if v.GT(types.VerbosityLevelSuccinct) {
+ header, reportHasContent = "P [PENDING]", true
+ }
+ case types.SpecStateSkipped:
+ header = "S"
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
+ header, reportHasContent = "S [SKIPPED]", true
+ }
+ default:
+ header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
+ if report.MaxMustPassRepeatedly > 1 {
+ header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
+ }
+ }
+
+ // If we have no content to show, just emit the header and return
+ if !reportHasContent {
+ r.emit(r.f(highlightColor + header + "{{/}}"))
+ if r.conf.ForceNewlines {
+ r.emit("\n")
+ }
+ return
+ }
+
+ if includeRuntime {
+ header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
+ }
+
+ // Emit header
+ if !timelineHasBeenStreaming {
+ r.emitDelimiter(0)
+ }
+ r.emitBlock(r.f(highlightColor + header + "{{/}}"))
+ if showCodeLocation {
+ r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
+ }
+
+ //Emit Stdout/Stderr Output
+ if showSeparateStdSection {
+ r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ })
+ }
+
+ if showSeparateVisibilityAlwaysReportsSection {
+ r.wrapTextBlock("Report Entries", func() {
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ })
+ }
+
+ if showTimeline {
+ r.wrapTextBlock("Timeline", func() {
+ r.emitTimeline(1, report, timeline)
+ })
+ }
+
+ // Emit Failure Message
+ if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
+ r.emitBlock("\n")
+ r.emitFailure(1, report.State, report.Failure, true)
+ if len(report.AdditionalFailures) > 0 {
+ r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
+ }
+ }
+
+ r.emitDelimiter(0)
+}
+
+func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
+ switch state {
+ case types.SpecStatePassed:
+ return "{{green}}"
+ case types.SpecStatePending:
+ return "{{yellow}}"
+ case types.SpecStateSkipped:
+ return "{{cyan}}"
+ case types.SpecStateFailed:
+ return "{{red}}"
+ case types.SpecStateTimedout:
+ return "{{orange}}"
+ case types.SpecStatePanicked:
+ return "{{magenta}}"
+ case types.SpecStateInterrupted:
+ return "{{orange}}"
+ case types.SpecStateAborted:
+ return "{{coral}}"
+ default:
+ return "{{gray}}"
+ }
+}
+
+func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
+ return strings.ToUpper(state.String())
+}
+
+func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
+ isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
+ gw := report.CapturedGinkgoWriterOutput
+ cursor := 0
+ for _, entry := range timeline {
+ tl := entry.GetTimelineLocation()
+ if tl.Offset < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
+ cursor = tl.Offset
+ } else if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ cursor = len(gw)
+ }
+ switch x := entry.(type) {
+ case types.Failure:
+ if isVeryVerbose {
+ r.emitFailure(indent, report.State, x, false)
+ } else {
+ r.emitShortFailure(indent, report.State, x)
+ }
+ case types.AdditionalFailure:
+ if isVeryVerbose {
+ r.emitFailure(indent, x.State, x.Failure, true)
+ } else {
+ r.emitShortFailure(indent, x.State, x.Failure)
+ }
+ case types.ReportEntry:
+ r.emitReportEntry(indent, x)
+ case types.ProgressReport:
+ r.emitProgressReport(indent, false, x)
+ case types.SpecEvent:
+ if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
+ r.emitSpecEvent(indent, x, isVeryVerbose)
+ }
+ }
+ }
+ if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ }
+}
+
+func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
+ r.emitShortFailure(1, state, failure)
+ } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
+ r.emitFailure(1, state, failure, true)
+ }
+}
+
+func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
+ r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
+ r.humanReadableState(state),
+ failure.FailureNodeType,
+ failure.Location,
+ failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
+ ))
+}
+
+func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
+ highlightColor := r.highlightColorForState(state)
+ r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
+ if r.conf.GithubOutput {
+ level := "error"
+ if state.Is(types.SpecStateSkipped) {
+ level = "notice"
+ }
+ r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ } else {
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+ if failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
+ }
+
+ if r.conf.FullTrace || failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
+ r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
+ }
+
+ if !failure.ProgressReport.IsZero() {
+ r.emitBlock("\n")
+ r.emitProgressReport(indent, false, failure.ProgressReport)
+ }
+
+ if failure.AdditionalFailure != nil && includeAdditionalFailure {
+ r.emitBlock("\n")
+ r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
+ }
+}
+
+func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
+ r.emitDelimiter(1)
+
+ if report.RunningInParallel {
+ r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
+ }
+ shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
+ r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitDelimiter(1)
+}
+
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+ if report.Message != "" {
+ r.emitBlock(r.fi(indent, report.Message+"\n"))
+ indent += 1
+ }
+ if report.LeafNodeText != "" {
+ subjectIndent := indent
+ if len(report.ContainerHierarchyTexts) > 0 {
+ r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " ")))
+ r.emit(" ")
+ subjectIndent = 0
+ }
+ r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
+ indent += 1
+ }
+ if report.CurrentNodeType != types.NodeTypeInvalid {
+ r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType))
+ if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) {
+ r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
+ }
+
+ r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
+ indent += 1
+ }
+ if report.CurrentStepText != "" {
+ r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
+ indent += 1
+ }
+
+ if indent > 0 {
+ indent -= 1
+ }
+
+ if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
+ limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
+ if len(lines) <= limit {
+ r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
+ } else {
+ r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
+ for _, line := range lines[len(lines)-limit-1:] {
+ r.emitBlock(r.fi(indent+1, "%s", line))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
+ }
+
+ if !report.SpecGoroutine().IsZero() {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n"))
+ r.emitGoroutines(indent, report.SpecGoroutine())
+ }
+
+ if len(report.AdditionalReports) > 0 {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}"))
+ for i, additionalReport := range report.AdditionalReports {
+ r.emit(r.fi(indent+1, additionalReport))
+ if i < len(report.AdditionalReports)-1 {
+ r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}"))
+ }
+
+ highlightedGoroutines := report.HighlightedGoroutines()
+ if len(highlightedGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n"))
+ r.emitGoroutines(indent, highlightedGoroutines...)
+ }
+
+ otherGoroutines := report.OtherGoroutines()
+ if len(otherGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
+ r.emitGoroutines(indent, otherGoroutines...)
+ }
+}
+
+func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
+ if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
+ return
+ }
+ r.emitReportEntry(1, entry)
+}
+
+func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
+ r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
+ if representation := entry.StringRepresentation(); representation != "" {
+ r.emitBlock(r.fi(indent+1, representation))
+ }
+}
+
+func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
+ v := r.conf.Verbosity()
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
+ r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
+ }
+}
+
+func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
+ location := ""
+ if includeLocation {
+ location = fmt.Sprintf("- %s ", event.CodeLocation.String())
+ }
+ switch event.SpecEventType {
+ case types.SpecEventInvalid:
+ return
+ case types.SpecEventByStart:
+ r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventByEnd:
+ r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventNodeStart:
+ r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventNodeEnd:
+ r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventSpecRepeat:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventSpecRetry:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+}
+
+func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
+ for idx, g := range goroutines {
+ color := "{{gray}}"
+ if g.HasHighlights() {
+ color = "{{orange}}"
+ }
+ r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State))
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ r.emitSource(indent+3, fc)
+ } else {
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ }
+ }
+
+ if idx+1 < len(goroutines) {
+ r.emit("\n")
+ }
+ }
+}
+
+func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
+ lines := fc.Source
+ if len(lines) == 0 {
+ return
+ }
+
+ lTrim := 100000
+ for _, line := range lines {
+ lTrimLine := len(line) - len(strings.TrimLeft(line, " \t"))
+ if lTrimLine < lTrim && len(line) > 0 {
+ lTrim = lTrimLine
+ }
+ }
+ if lTrim == 100000 {
+ lTrim = 0
+ }
+
+ for idx, line := range lines {
+ if len(line) > lTrim {
+ line = line[lTrim:]
+ }
+ if idx == fc.SourceHighlight {
+ r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line))
+ } else {
+ r.emit(r.fi(indent, "| %s\n", line))
+ }
+ }
+}
+
+/* Emitting to the writer */
+func (r *DefaultReporter) emit(s string) {
+ r._emit(s, false, false)
+}
+
+func (r *DefaultReporter) emitBlock(s string) {
+ r._emit(s, true, false)
+}
+
+func (r *DefaultReporter) emitDelimiter(indent uint) {
+ r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
+}
+
+// a bit ugly - but we're trying to minimize locking on this hot codepath
+func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
+ if len(s) == 0 {
+ return
+ }
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if isDelimiter && r.lastEmissionWasDelimiter {
+ return
+ }
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ }
+ r.lastCharWasNewline = (s[len(s)-1:] == "\n")
+ r.writer.Write([]byte(s))
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ r.lastCharWasNewline = true
+ }
+ r.lastEmissionWasDelimiter = isDelimiter
+}
+
+/* Rendering text */
+func (r *DefaultReporter) f(format string, args ...interface{}) string {
+ return r.formatter.F(format, args...)
+}
+
+func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+ return r.formatter.Fi(indentation, format, args...)
+}
+
+func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
+ return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
+}
+
+func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
+ texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
+ texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
+ } else {
+ texts = append(texts, r.f(report.LeafNodeText))
+ }
+ labels = append(labels, report.LeafNodeLabels)
+ locations = append(locations, report.LeafNodeLocation)
+
+ failureLocation := report.Failure.FailureNodeLocation
+ if usePreciseFailureLocation {
+ failureLocation = report.Failure.Location
+ }
+
+ highlightIndex := -1
+ switch report.Failure.FailureNodeContext {
+ case types.FailureNodeAtTopLevel:
+ texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
+ locations = append([]types.CodeLocation{failureLocation}, locations...)
+ labels = append([][]string{{}}, labels...)
+ highlightIndex = 0
+ case types.FailureNodeInContainer:
+ i := report.Failure.FailureNodeContainerIndex
+ texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
+ locations[i] = failureLocation
+ highlightIndex = i
+ case types.FailureNodeIsLeafNode:
+ i := len(texts) - 1
+ texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
+ locations[i] = failureLocation
+ highlightIndex = i
+ default:
+ //there is no failure, so we highlight the leaf ndoe
+ highlightIndex = len(texts) - 1
+ }
+
+ out := ""
+ if veryVerbose {
+ for i := range texts {
+ if i == highlightIndex {
+ out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
+ } else {
+ out += r.fi(uint(i), "%s", texts[i])
+ }
+ if len(labels[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
+ }
+ out += "\n"
+ out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
+ }
+ } else {
+ for i := range texts {
+ style := "{{/}}"
+ if i%2 == 1 {
+ style = "{{gray}}"
+ }
+ if i == highlightIndex {
+ style = highlightColor + "{{bold}}"
+ }
+ out += r.f(style+"%s", texts[i])
+ if i < len(texts)-1 {
+ out += " "
+ } else {
+ out += r.f("{{/}}")
+ }
+ }
+ flattenedLabels := report.Labels()
+ if len(flattenedLabels) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
+ }
+ out += "\n"
+ if usePreciseFailureLocation {
+ out += r.f("{{gray}}%s{{/}}", failureLocation)
+ } else {
+ leafLocation := locations[len(locations)-1]
+ if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
+ out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
+ out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
+ } else {
+ out += r.f("{{gray}}%s{{/}}", leafLocation)
+ }
+ }
+
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
new file mode 100644
index 000000000..613072ebf
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
@@ -0,0 +1,149 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
+// this has been removed in V2.
+// Please read the documentation at:
+// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+// for Ginkgo's new behavior and for a migration path.
+type DeprecatedReporter interface {
+ SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+ BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+ SpecWillRun(specSummary *types.SpecSummary)
+ SpecDidComplete(specSummary *types.SpecSummary)
+ AfterSuiteDidRun(setupSummary *types.SetupSummary)
+ SuiteDidEnd(summary *types.SuiteSummary)
+}
+
+// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
+// calls the custom reporter's methods with appropriately transformed data from the V2 report.
+//
+// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
+//
+// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
+// reporting support in V2. It will be removed in a future minor version of Ginkgo.
+func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
+ conf := config.DeprecatedGinkgoConfigType{
+ RandomSeed: report.SuiteConfig.RandomSeed,
+ RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
+ FocusStrings: report.SuiteConfig.FocusStrings,
+ SkipStrings: report.SuiteConfig.SkipStrings,
+ FailOnPending: report.SuiteConfig.FailOnPending,
+ FailFast: report.SuiteConfig.FailFast,
+ FlakeAttempts: report.SuiteConfig.FlakeAttempts,
+ EmitSpecProgress: false,
+ DryRun: report.SuiteConfig.DryRun,
+ ParallelNode: report.SuiteConfig.ParallelProcess,
+ ParallelTotal: report.SuiteConfig.ParallelTotal,
+ SyncHost: report.SuiteConfig.ParallelHost,
+ StreamHost: report.SuiteConfig.ParallelHost,
+ }
+
+ summary := &types.DeprecatedSuiteSummary{
+ SuiteDescription: report.SuiteDescription,
+ SuiteID: report.SuitePath,
+
+ NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
+ NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
+ NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
+ }
+
+ reporter.SuiteWillBegin(conf, summary)
+
+ for _, spec := range report.SpecReports {
+ switch spec.LeafNodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.BeforeSuiteDidRun(setupSummary)
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.AfterSuiteDidRun(setupSummary)
+ case types.NodeTypeIt:
+ componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
+ componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
+ componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
+ componentTexts = append(componentTexts, spec.LeafNodeText)
+ componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
+
+ specSummary := &types.DeprecatedSpecSummary{
+ ComponentTexts: componentTexts,
+ ComponentCodeLocations: componentCodeLocations,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ NumberOfSamples: spec.NumAttempts,
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.SpecWillRun(specSummary)
+ reporter.SpecDidComplete(specSummary)
+
+ switch spec.State {
+ case types.SpecStatePending:
+ summary.NumberOfPendingSpecs += 1
+ case types.SpecStateSkipped:
+ summary.NumberOfSkippedSpecs += 1
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
+ summary.NumberOfFailedSpecs += 1
+ case types.SpecStatePassed:
+ summary.NumberOfPassedSpecs += 1
+ if spec.NumAttempts > 1 {
+ summary.NumberOfFlakedSpecs += 1
+ }
+ }
+ }
+ }
+
+ summary.SuiteSucceeded = report.SuiteSucceeded
+ summary.RunTime = report.RunTime
+
+ reporter.SuiteDidEnd(summary)
+}
+
+func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
+ if spec.Failure.IsZero() {
+ return types.DeprecatedSpecFailure{}
+ }
+
+ index := 0
+ switch spec.Failure.FailureNodeContext {
+ case types.FailureNodeInContainer:
+ index = spec.Failure.FailureNodeContainerIndex
+ case types.FailureNodeAtTopLevel:
+ index = -1
+ case types.FailureNodeIsLeafNode:
+ index = len(spec.ContainerHierarchyTexts) - 1
+ if spec.LeafNodeText != "" {
+ index += 1
+ }
+ }
+
+ return types.DeprecatedSpecFailure{
+ Message: spec.Failure.Message,
+ Location: spec.Failure.Location,
+ ForwardedPanic: spec.Failure.ForwardedPanic,
+ ComponentIndex: index,
+ ComponentType: spec.Failure.FailureNodeType,
+ ComponentCodeLocation: spec.Failure.FailureNodeLocation,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
new file mode 100644
index 000000000..5d3e8db99
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
@@ -0,0 +1,69 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateJSONReport produces a JSON-formatted report at the passed in destination
+func GenerateJSONReport(report types.Report, destination string) error {
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode([]types.Report{
+ report,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ allReports := []types.Report{}
+ for _, source := range sources {
+ reports := []types.Report{}
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = json.Unmarshal(data, &reports)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ allReports = append(allReports, reports...)
+ }
+
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode(allReports)
+ if err != nil {
+ return messages, err
+ }
+ return messages, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
new file mode 100644
index 000000000..562e0f62b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -0,0 +1,390 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
+
+*/
+
+package reporters
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type JunitReportConfig struct {
+ // Spec States for which no timeline should be emitted for system-err
+ // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
+ OmitTimelinesForSpecState types.SpecState
+
+ // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
+ OmitFailureMessageAttr bool
+
+ //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
+ OmitCapturedStdOutErr bool
+
+ // Enable OmitSpecLabels to prevent labels from appearing in the spec name
+ OmitSpecLabels bool
+
+ // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
+ OmitLeafNodeType bool
+
+ // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
+ OmitSuiteSetupNodes bool
+}
+
+type JUnitTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+ // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending and/or skipped
+ Disabled int `xml:"disabled,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all test suites
+ Time float64 `xml:"time,attr"`
+
+ //The set of all test suites
+ TestSuites []JUnitTestSuite `xml:"testsuite"`
+}
+
+type JUnitTestSuite struct {
+ // Name maps onto the description of the test suite - maps onto Report.SuiteDescription
+ Name string `xml:"name,attr"`
+ // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
+ Package string `xml:"package,attr"`
+ // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending
+ Disabled int `xml:"disabled,attr"`
+ // Skiped maps onto specs that are skipped
+ Skipped int `xml:"skipped,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
+ Time float64 `xml:"time,attr"`
+ // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
+ Timestamp string `xml:"timestamp,attr"`
+
+ //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
+ Properties JUnitProperties `xml:"properties"`
+
+ //TestCases capture the individual specs
+ TestCases []JUnitTestCase `xml:"testcase"`
+}
+
+type JUnitProperties struct {
+ Properties []JUnitProperty `xml:"property"`
+}
+
+func (jup JUnitProperties) WithName(name string) string {
+ for _, property := range jup.Properties {
+ if property.Name == name {
+ return property.Value
+ }
+ }
+ return ""
+}
+
+type JUnitProperty struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
+
+var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
+
+type JUnitTestCase struct {
+ // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
+ Name string `xml:"name,attr"`
+ // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
+ Classname string `xml:"classname,attr"`
+ // Status maps onto the string representation of SpecReport.State
+ Status string `xml:"status,attr"`
+ // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
+ Time float64 `xml:"time,attr"`
+ // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
+ Owner string `xml:"owner,attr,omitempty"`
+ //Skipped is populated with a message if the test was skipped or pending
+ Skipped *JUnitSkipped `xml:"skipped,omitempty"`
+ //Error is populated if the test panicked or was interrupted
+ Error *JUnitError `xml:"error,omitempty"`
+ //Failure is populated if the test failed
+ Failure *JUnitFailure `xml:"failure,omitempty"`
+ //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
+ SystemOut string `xml:"system-out,omitempty"`
+ //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
+ SystemErr string `xml:"system-err,omitempty"`
+}
+
+type JUnitSkipped struct {
+ // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
+ Message string `xml:"message,attr"`
+}
+
+type JUnitError struct {
+ //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
+ Message string `xml:"message,attr"`
+ //Type is one of "panicked" or "interrupted"
+ Type string `xml:"type,attr"`
+ //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
+ Description string `xml:",chardata"`
+}
+
+type JUnitFailure struct {
+ //Message maps onto the failure message - equivalent to SpecReport.Failure.Message
+ Message string `xml:"message,attr"`
+ //Type is "failed"
+ Type string `xml:"type,attr"`
+ //Description maps onto the location and stack trace of the failure
+ Description string `xml:",chardata"`
+}
+
+func GenerateJUnitReport(report types.Report, dst string) error {
+ return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
+}
+
+func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
+ suite := JUnitTestSuite{
+ Name: report.SuiteDescription,
+ Package: report.SuitePath,
+ Time: report.RunTime.Seconds(),
+ Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
+ Properties: JUnitProperties{
+ Properties: []JUnitProperty{
+ {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
+ {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
+ {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
+ {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
+ {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
+ {"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
+ {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
+ {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
+ {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
+ {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
+ {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
+ {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
+ {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
+ {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
+ {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
+ },
+ },
+ }
+ for _, spec := range report.SpecReports {
+ if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
+ continue
+ }
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if config.OmitLeafNodeType {
+ name = ""
+ }
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 && !config.OmitSpecLabels {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ owner := ""
+ for _, label := range labels {
+ if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
+ owner = matches[1]
+ }
+ }
+ name = strings.TrimSpace(name)
+
+ test := JUnitTestCase{
+ Name: name,
+ Classname: report.SuiteDescription,
+ Status: spec.State.String(),
+ Time: spec.RunTime.Seconds(),
+ Owner: owner,
+ }
+ if !spec.State.Is(config.OmitTimelinesForSpecState) {
+ test.SystemErr = systemErrForUnstructuredReporters(spec)
+ }
+ if !config.OmitCapturedStdOutErr {
+ test.SystemOut = systemOutForUnstructuredReporters(spec)
+ }
+ suite.Tests += 1
+
+ switch spec.State {
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ test.Skipped = &JUnitSkipped{Message: message}
+ suite.Skipped += 1
+ case types.SpecStatePending:
+ test.Skipped = &JUnitSkipped{Message: "pending"}
+ suite.Disabled += 1
+ case types.SpecStateFailed:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "failed",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateTimedout:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "timedout",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateInterrupted:
+ test.Error = &JUnitError{
+ Message: spec.Failure.Message,
+ Type: "interrupted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStateAborted:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "aborted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStatePanicked:
+ test.Error = &JUnitError{
+ Message: spec.Failure.ForwardedPanic,
+ Type: "panicked",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ }
+
+ suite.TestCases = append(suite.TestCases, test)
+ }
+
+ junitReport := JUnitTestSuites{
+ Tests: suite.Tests,
+ Disabled: suite.Disabled + suite.Skipped,
+ Errors: suite.Errors,
+ Failures: suite.Failures,
+ Time: suite.Time,
+ TestSuites: []JUnitTestSuite{suite},
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(junitReport)
+
+ return f.Close()
+}
+
+func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ mergedReport := JUnitTestSuites{}
+ for _, source := range sources {
+ report := JUnitTestSuites{}
+ f, err := os.Open(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = xml.NewDecoder(f).Decode(&report)
+ _ = f.Close()
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+
+ mergedReport.Tests += report.Tests
+ mergedReport.Disabled += report.Disabled
+ mergedReport.Errors += report.Errors
+ mergedReport.Failures += report.Failures
+ mergedReport.Time += report.Time
+ mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return messages, err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(mergedReport)
+
+ return messages, f.Close()
+}
+
+func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
+ if len(spec.AdditionalFailures) > 0 {
+ out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
+ }
+ return out.String()
+}
+
+func systemErrForUnstructuredReporters(spec types.SpecReport) string {
+ return RenderTimeline(spec, true)
+}
+
+func RenderTimeline(spec types.SpecReport, noColor bool) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
+ return out.String()
+}
+
+func systemOutForUnstructuredReporters(spec types.SpecReport) string {
+ return spec.CapturedStdOutErr
+}
+
+// Deprecated JUnitReporter (so folks can still compile their suites)
+type JUnitReporter struct{}
+
+func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
+func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
+func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
new file mode 100644
index 000000000..5e726c464
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
@@ -0,0 +1,29 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Reporter interface {
+ SuiteWillBegin(report types.Report)
+ WillRun(report types.SpecReport)
+ DidRun(report types.SpecReport)
+ SuiteDidEnd(report types.Report)
+
+ //Timeline emission
+ EmitFailure(state types.SpecState, failure types.Failure)
+ EmitProgressReport(progressReport types.ProgressReport)
+ EmitReportEntry(entry types.ReportEntry)
+ EmitSpecEvent(event types.SpecEvent)
+}
+
+type NoopReporter struct{}
+
+func (n NoopReporter) SuiteWillBegin(report types.Report) {}
+func (n NoopReporter) WillRun(report types.SpecReport) {}
+func (n NoopReporter) DidRun(report types.SpecReport) {}
+func (n NoopReporter) SuiteDidEnd(report types.Report) {}
+func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
+func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
+func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
+func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
new file mode 100644
index 000000000..e990ad82e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -0,0 +1,105 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func tcEscape(s string) string {
+ s = strings.ReplaceAll(s, "|", "||")
+ s = strings.ReplaceAll(s, "'", "|'")
+ s = strings.ReplaceAll(s, "\n", "|n")
+ s = strings.ReplaceAll(s, "\r", "|r")
+ s = strings.ReplaceAll(s, "[", "|[")
+ s = strings.ReplaceAll(s, "]", "|]")
+ return s
+}
+
+func GenerateTeamcityReport(report types.Report, dst string) error {
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+
+ name := report.SuiteDescription
+ labels := report.SuiteLabels
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
+ for _, spec := range report.SpecReports {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+
+ name = tcEscape(name)
+ fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
+ switch spec.State {
+ case types.SpecStatePending:
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
+ case types.SpecStateFailed:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStatePanicked:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
+ case types.SpecStateTimedout:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateInterrupted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateAborted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ }
+
+ fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
+
+ return f.Close()
+}
+
+func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ merged := []byte{}
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ merged = append(merged, data...)
+ }
+ return messages, os.WriteFile(dst, merged, 0666)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
new file mode 100644
index 000000000..aa1a35176
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -0,0 +1,221 @@
+package ginkgo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Report represents the report for a Suite.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report
+*/
+type Report = types.Report
+
+/*
+Report represents the report for a Spec.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type SpecReport = types.SpecReport
+
+/*
+CurrentSpecReport returns information about the current running spec.
+The returned object is a types.SpecReport which includes helper methods
+to make extracting information about the spec easier.
+
+You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
+You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+*/
+func CurrentSpecReport() SpecReport {
+ return global.Suite.CurrentSpecReport()
+}
+
+/*
+ ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+
+- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
+- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
+- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`).
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+type ReportEntryVisibility = types.ReportEntryVisibility
+
+const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever
+
+/*
+AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
+It can take any of the following arguments:
+ - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
+ - A ReportEntryVisibility enum to control the visibility of the ReportEntry
+ - An Offset or CodeLocation decoration to control the reported location of the ReportEntry
+
+If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
+
+AddReportEntry() must be called within a Subject or Setup node - not in a Container node.
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+func AddReportEntry(name string, args ...interface{}) {
+ cl := types.NewCodeLocation(1)
+ reportEntry, err := internal.NewReportEntry(name, cl, args...)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1)
+ }
+ err = global.Suite.AddReportEntry(reportEntry)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1)
+ }
+}
+
+/*
+ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
+receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
+
+Example:
+
+ ReportBeforeEach(func(report SpecReport) { // process report })
+ ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
+You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+}
+
+/*
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
+ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
+They are called after the spec has completed and receive the final report for the spec.
+
+Example:
+
+ ReportAfterEach(func(report SpecReport) { // process report })
+ ReportAfterEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
+You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+}
+
+/*
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
+that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportBeforeSuite(func(r Report) { // process report })
+ ReportBeforeSuite(func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
+ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeSuite(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+}
+
+/*
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
+and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
+ ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
+ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
+all parallel nodes
+
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterSuite(text string, body any, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+}
+
+func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
+ body := func(report Report) {
+ if reporterConfig.JSONReport != "" {
+ err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.JUnitReport != "" {
+ err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.TeamcityReport != "" {
+ err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error()))
+ }
+ }
+ }
+
+ flags := []string{}
+ if reporterConfig.JSONReport != "" {
+ flags = append(flags, "--json-report")
+ }
+ if reporterConfig.JUnitReport != "" {
+ flags = append(flags, "--junit-report")
+ }
+ if reporterConfig.TeamcityReport != "" {
+ flags = append(flags, "--teamcity-report")
+ }
+ pushNode(internal.NewNode(
+ deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ))
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
new file mode 100644
index 000000000..c7de7a8be
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -0,0 +1,386 @@
+package ginkgo
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
+
+ fmt.Sprintf(formatString, parameters...)
+
+where parameters are the parameters passed into the entry.
+
+When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions.
+
+You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions
+*/
+type EntryDescription string
+
+func (ed EntryDescription) render(args ...interface{}) string {
+ return fmt.Sprintf(string(ed), args...)
+}
+
+/*
+DescribeTable describes a table-driven spec.
+
+For example:
+
+ DescribeTable("a simple table",
+ func(x int, y int, expected bool) {
+ Ω(x > y).Should(Equal(expected))
+ },
+ Entry("x > y", 1, 0, true),
+ Entry("x == y", 0, 0, false),
+ Entry("x < y", 0, 1, false),
+ )
+
+You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTable = PDescribeTable
+
+/*
+DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry.
+
+For example:
+
+ DescribeTableSubtree("a subtree table",
+ func(url string, code int, message string) {
+ var resp *http.Response
+ BeforeEach(func() {
+ var err error
+ resp, err = http.Get(url)
+ Expect(err).NotTo(HaveOccurred())
+ DeferCleanup(resp.Body.Close)
+ })
+
+ It("should return the expected status code", func() {
+ Expect(resp.StatusCode).To(Equal(code))
+ })
+
+ It("should return the expected message", func() {
+ body, err := ioutil.ReadAll(resp.Body)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(body)).To(Equal(message))
+ })
+ },
+ Entry("default response", "example.com/response", http.StatusOK, "hello world"),
+ Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"),
+ )
+
+Note that you **must** place define an It inside the body function.
+
+You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTableSubtree = PDescribeTableSubtree
+
+/*
+TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+ description interface{}
+ decorations []interface{}
+ parameters []interface{}
+ codeLocation types.CodeLocation
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description.
+Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table.
+
+Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in.
+
+If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators.
+
+You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
+*/
+func Entry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can focus a particular entry with FEntry. This is equivalent to FIt.
+*/
+func FEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Focus)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
+*/
+func PEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Pending)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
+*/
+var XEntry = PEntry
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func generateTable(description string, isSubtree bool, args ...interface{}) {
+ GinkgoHelper()
+ cl := types.NewCodeLocation(0)
+ containerNodeArgs := []interface{}{cl}
+
+ entries := []TableEntry{}
+ var internalBody interface{}
+ var internalBodyType reflect.Type
+
+ var tableLevelEntryDescription interface{}
+ tableLevelEntryDescription = func(args ...interface{}) string {
+ out := []string{}
+ for _, arg := range args {
+ out = append(out, fmt.Sprint(arg))
+ }
+ return "Entry: " + strings.Join(out, ", ")
+ }
+
+ if len(args) == 1 {
+ exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl))
+ }
+
+ for i, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl))
+ case t == reflect.TypeOf(TableEntry{}):
+ entries = append(entries, arg.(TableEntry))
+ case t == reflect.TypeOf([]TableEntry{}):
+ entries = append(entries, arg.([]TableEntry)...)
+ case t == reflect.TypeOf(EntryDescription("")):
+ tableLevelEntryDescription = arg.(EntryDescription).render
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ tableLevelEntryDescription = arg
+ case t.Kind() == reflect.Func:
+ if internalBody != nil {
+ exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
+ }
+ internalBody = arg
+ internalBodyType = reflect.TypeOf(internalBody)
+ default:
+ containerNodeArgs = append(containerNodeArgs, arg)
+ }
+ }
+
+ containerNodeArgs = append(containerNodeArgs, func() {
+ for _, entry := range entries {
+ var err error
+ entry := entry
+ var description string
+ switch t := reflect.TypeOf(entry.description); {
+ case t == nil:
+ err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String()
+ }
+ case t == reflect.TypeOf(EntryDescription("")):
+ description = entry.description.(EntryDescription).render(entry.parameters...)
+ case t == reflect.TypeOf(""):
+ description = entry.description.(string)
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(entry.description, entry.parameters)[0].String()
+ }
+ default:
+ err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
+ }
+
+ internalNodeArgs := []interface{}{entry.codeLocation}
+ internalNodeArgs = append(internalNodeArgs, entry.decorations...)
+
+ hasContext := false
+ if internalBodyType.NumIn() > 0 {
+ if internalBodyType.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if internalBodyType.In(0).Implements(contextType) {
+ hasContext = true
+ if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) {
+ // we allow you to pass in a non-nil context
+ hasContext = false
+ }
+ }
+ }
+
+ if err == nil {
+ err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
+ }
+
+ if hasContext {
+ internalNodeArgs = append(internalNodeArgs, func(c SpecContext) {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...))
+ })
+ if isSubtree {
+ exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
+ }
+ } else {
+ internalNodeArgs = append(internalNodeArgs, func() {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, entry.parameters)
+ })
+ }
+
+ internalNodeType := types.NodeTypeIt
+ if isSubtree {
+ internalNodeType = types.NodeTypeContainer
+ }
+
+ pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...))
+ }
+ })
+
+ pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+}
+
+func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+ inValues := make([]reflect.Value, len(parameters))
+
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+
+ for i := 0; i < limit && i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], funcType.In(i))
+ }
+
+ if funcType.IsVariadic() {
+ variadicType := funcType.In(limit).Elem()
+ for i := limit; i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], variadicType)
+ }
+ }
+
+ return reflect.ValueOf(function).Call(inValues)
+}
+
+func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ offset := 0
+ if hasContext {
+ limit = limit - 1
+ offset = 1
+ }
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+ if len(parameters) < limit {
+ return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ if len(parameters) > limit && !funcType.IsVariadic() {
+ return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ var i = 0
+ for ; i < limit; i++ {
+ actual := reflect.TypeOf(parameters[i])
+ expected := funcType.In(i + offset)
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl)
+ }
+ }
+ if funcType.IsVariadic() {
+ expected := funcType.In(limit + offset).Elem()
+ for ; i < len(parameters); i++ {
+ actual := reflect.TypeOf(parameters[i])
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl)
+ }
+ }
+ }
+
+ return nil
+}
+
+func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+ if parameter == nil {
+ return reflect.Zero(t)
+ } else {
+ return reflect.ValueOf(parameter)
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
new file mode 100644
index 000000000..57e87517e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
@@ -0,0 +1,159 @@
+package types
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+)
+
+type CodeLocation struct {
+ FileName string `json:",omitempty"`
+ LineNumber int `json:",omitempty"`
+ FullStackTrace string `json:",omitempty"`
+ CustomMessage string `json:",omitempty"`
+}
+
+func (codeLocation CodeLocation) String() string {
+ if codeLocation.CustomMessage != "" {
+ return codeLocation.CustomMessage
+ }
+ return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
+
+func (codeLocation CodeLocation) ContentsOfLine() string {
+ if codeLocation.CustomMessage != "" {
+ return ""
+ }
+ contents, err := os.ReadFile(codeLocation.FileName)
+ if err != nil {
+ return ""
+ }
+ lines := strings.Split(string(contents), "\n")
+ if len(lines) < codeLocation.LineNumber {
+ return ""
+ }
+ return lines[codeLocation.LineNumber-1]
+}
+
+type codeLocationLocator struct {
+ pcs map[uintptr]bool
+ helpers map[string]bool
+ lock *sync.Mutex
+}
+
+func (c *codeLocationLocator) addHelper(pc uintptr) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.pcs[pc] {
+ return
+ }
+ c.lock.Unlock()
+ f := runtime.FuncForPC(pc)
+ c.lock.Lock()
+ if f == nil {
+ return
+ }
+ c.helpers[f.Name()] = true
+ c.pcs[pc] = true
+}
+
+func (c *codeLocationLocator) hasHelper(name string) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.helpers[name]
+}
+
+func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
+ pc := make([]uintptr, 40)
+ n := runtime.Callers(skip+2, pc)
+ if n == 0 {
+ return CodeLocation{}
+ }
+ pc = pc[:n]
+ frames := runtime.CallersFrames(pc)
+ for {
+ frame, more := frames.Next()
+ if !c.hasHelper(frame.Function) {
+ return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
+ }
+ if !more {
+ break
+ }
+ }
+ return CodeLocation{}
+}
+
+var clLocator = &codeLocationLocator{
+ pcs: map[uintptr]bool{},
+ helpers: map[string]bool{},
+ lock: &sync.Mutex{},
+}
+
+// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
+func MarkAsHelper(optionalSkip ...int) {
+ skip := 1
+ if len(optionalSkip) > 0 {
+ skip += optionalSkip[0]
+ }
+ pc, _, _, ok := runtime.Caller(skip)
+ if ok {
+ clLocator.addHelper(pc)
+ }
+}
+
+func NewCustomCodeLocation(message string) CodeLocation {
+ return CodeLocation{
+ CustomMessage: message,
+ }
+}
+
+func NewCodeLocation(skip int) CodeLocation {
+ return clLocator.getCodeLocation(skip + 1)
+}
+
+func NewCodeLocationWithStackTrace(skip int) CodeLocation {
+ cl := clLocator.getCodeLocation(skip + 1)
+ cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
+ return cl
+}
+
+// PruneStack removes references to functions that are internal to Ginkgo
+// and the Go runtime from a stack string and a certain number of stack entries
+// at the beginning of the stack. The stack string has the format
+// as returned by runtime/debug.Stack. The leading goroutine information is
+// optional and always removed if present. Beware that runtime/debug.Stack
+// adds itself as first entry, so typically skip must be >= 1 to remove that
+// entry.
+func PruneStack(fullStackTrace string, skip int) string {
+ stack := strings.Split(fullStackTrace, "\n")
+ // Ensure that the even entries are the method names and the
+ // odd entries the source code information.
+ if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
+ // Ignore "goroutine 29 [running]:" line.
+ stack = stack[1:]
+ }
+ // The "+1" is for skipping over the initial entry, which is
+ // runtime/debug.Stack() itself.
+ if len(stack) > 2*(skip+1) {
+ stack = stack[2*(skip+1):]
+ }
+ prunedStack := []string{}
+ if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
+ prunedStack = stack
+ } else {
+ re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+ for i := 0; i < len(stack)/2; i++ {
+ // We filter out based on the source code file name.
+ if !re.MatchString(stack[i*2+1]) {
+ prunedStack = append(prunedStack, stack[i*2])
+ prunedStack = append(prunedStack, stack[i*2+1])
+ }
+ }
+ }
+ return strings.Join(prunedStack, "\n")
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
new file mode 100644
index 000000000..8c0dfab8c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -0,0 +1,776 @@
+/*
+Ginkgo accepts a number of configuration options.
+These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
+*/
+
+package types
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration controlling how an individual test suite is run
+type SuiteConfig struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ FocusStrings []string
+ SkipStrings []string
+ FocusFiles []string
+ SkipFiles []string
+ LabelFilter string
+ FailOnPending bool
+ FailOnEmpty bool
+ FailFast bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ DryRun bool
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ Timeout time.Duration
+ EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
+ OutputInterceptorMode string
+ SourceRoots []string
+ GracePeriod time.Duration
+
+ ParallelProcess int
+ ParallelTotal int
+ ParallelHost string
+}
+
+func NewDefaultSuiteConfig() SuiteConfig {
+ return SuiteConfig{
+ RandomSeed: time.Now().Unix(),
+ Timeout: time.Hour,
+ ParallelProcess: 1,
+ ParallelTotal: 1,
+ GracePeriod: 30 * time.Second,
+ }
+}
+
+type VerbosityLevel uint
+
+const (
+ VerbosityLevelSuccinct VerbosityLevel = iota
+ VerbosityLevelNormal
+ VerbosityLevelVerbose
+ VerbosityLevelVeryVerbose
+)
+
+func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
+ return vl > comp
+}
+
+func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
+ return vl >= comp
+}
+
+func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
+ return vl == comp
+}
+
+func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
+ return vl <= comp
+}
+
+func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
+ return vl < comp
+}
+
+// Configuration for Ginkgo's reporter
+type ReporterConfig struct {
+ NoColor bool
+ Succinct bool
+ Verbose bool
+ VeryVerbose bool
+ FullTrace bool
+ ShowNodeEvents bool
+ GithubOutput bool
+ SilenceSkips bool
+ ForceNewlines bool
+
+ JSONReport string
+ JUnitReport string
+ TeamcityReport string
+}
+
+func (rc ReporterConfig) Verbosity() VerbosityLevel {
+ if rc.Succinct {
+ return VerbosityLevelSuccinct
+ } else if rc.Verbose {
+ return VerbosityLevelVerbose
+ } else if rc.VeryVerbose {
+ return VerbosityLevelVeryVerbose
+ }
+ return VerbosityLevelNormal
+}
+
+func (rc ReporterConfig) WillGenerateReport() bool {
+ return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+}
+
+func NewDefaultReporterConfig() ReporterConfig {
+ return ReporterConfig{}
+}
+
+// Configuration for the Ginkgo CLI
+type CLIConfig struct {
+ //for build, run, and watch
+ Recurse bool
+ SkipPackage string
+ RequireSuite bool
+ NumCompilers int
+
+ //for run and watch only
+ Procs int
+ Parallel bool
+ AfterRunHook string
+ OutputDir string
+ KeepSeparateCoverprofiles bool
+ KeepSeparateReports bool
+
+ //for run only
+ KeepGoing bool
+ UntilItFails bool
+ Repeat int
+ RandomizeSuites bool
+
+ //for watch only
+ Depth int
+ WatchRegExp string
+}
+
+func NewDefaultCLIConfig() CLIConfig {
+ return CLIConfig{
+ Depth: 1,
+ WatchRegExp: `\.go$`,
+ }
+}
+
+func (g CLIConfig) ComputedProcs() int {
+ if g.Procs > 0 {
+ return g.Procs
+ }
+
+ n := 1
+ if g.Parallel {
+ n = runtime.NumCPU()
+ if n > 4 {
+ n = n - 1
+ }
+ }
+ return n
+}
+
+func (g CLIConfig) ComputedNumCompilers() int {
+ if g.NumCompilers > 0 {
+ return g.NumCompilers
+ }
+
+ return runtime.NumCPU()
+}
+
+// Configuration for the Ginkgo CLI capturing available go flags
+// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
+// More details can be found at:
+// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
+type GoFlagsConfig struct {
+ //build-time flags for code-and-performance analysis
+ Race bool
+ Cover bool
+ CoverMode string
+ CoverPkg string
+ Vet string
+
+ //run-time flags for code-and-performance analysis
+ BlockProfile string
+ BlockProfileRate int
+ CoverProfile string
+ CPUProfile string
+ MemProfile string
+ MemProfileRate int
+ MutexProfile string
+ MutexProfileFraction int
+ Trace string
+
+ //build-time flags for building
+ A bool
+ ASMFlags string
+ BuildMode string
+ BuildVCS bool
+ Compiler string
+ GCCGoFlags string
+ GCFlags string
+ InstallSuffix string
+ LDFlags string
+ LinkShared bool
+ Mod string
+ N bool
+ ModFile string
+ ModCacheRW bool
+ MSan bool
+ PkgDir string
+ Tags string
+ TrimPath bool
+ ToolExec string
+ Work bool
+ X bool
+ O string
+}
+
+func NewDefaultGoFlagsConfig() GoFlagsConfig {
+ return GoFlagsConfig{}
+}
+
+func (g GoFlagsConfig) BinaryMustBePreserved() bool {
+ return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
+}
+
+// Configuration that were deprecated in 2.0
+type deprecatedConfig struct {
+ DebugParallel bool
+ NoisySkippings bool
+ NoisyPendings bool
+ RegexScansFilePath bool
+ SlowSpecThresholdWithFLoatUnits float64
+ Stream bool
+ Notify bool
+ EmitSpecProgress bool
+ SlowSpecThreshold time.Duration
+ AlwaysEmitGinkgoWriter bool
+}
+
+// Flags
+
+// Flags sections used by both the CLI and the Ginkgo test process
+var FlagSections = GinkgoFlagSections{
+ {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
+ {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
+ {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
+ {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
+ Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
+ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
+ {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
+ {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
+ Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
+ {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
+ {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
+ {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
+ Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
+}
+
+// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
+var SuiteConfigFlags = GinkgoFlags{
+ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
+ Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
+ {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
+
+ {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
+ {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
+ {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+ {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
+ Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
+
+ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
+ {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
+ Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
+ {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
+ Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."},
+ {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug",
+ Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."},
+ {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
+ Usage: "Test suite fails if it does not complete within the specified timeout."},
+ {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s",
+ Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."},
+ {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
+ Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
+
+ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
+ Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
+
+ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
+ DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
+}
+
+// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
+var ParallelConfigFlags = GinkgoFlags{
+ {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
+ {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "The total number of worker processes. For running specs in parallel."},
+ {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
+ Usage: "The address for the server that will synchronize the processes."},
+}
+
+// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
+var ReporterConfigFlags = GinkgoFlags{
+ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"},
+ {KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
+ Usage: "If set, emits more output including GinkgoWriter contents."},
+ {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
+ Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
+ {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
+ Usage: "If set, default reporter prints out a very succinct report"},
+ {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
+ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
+ {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
+ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+ {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
+ Usage: "If set, default reporter prints easier to manage output in Github Actions."},
+ {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
+ Usage: "If set, default reporter will not print out skipped tests."},
+ {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
+ Usage: "If set, default reporter will ensure a newline appears after each test."},
+
+ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
+ Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
+ {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
+
+ {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
+ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
+ {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
+ {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
+}
+
+// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
+func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
+ flags = flags.WithPrefix("ginkgo")
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "D": &deprecatedConfig{},
+ }
+ extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
+
+ return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
+}
+
+// VetConfig validates that the Ginkgo test process' configuration is sound
+func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
+ errors := []error{}
+
+ if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
+ flag := flagSet.Lookup("count")
+ if flag == nil {
+ flag = flagSet.Lookup("test.count")
+ }
+ count, err := strconv.Atoi(flag.Value.String())
+ if err != nil || count != 1 {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
+ }
+ }
+
+ if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
+ }
+
+ if suiteConfig.ParallelTotal < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
+ }
+
+ if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
+ }
+
+ if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
+ errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
+ }
+
+ if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
+ errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
+ }
+
+ if suiteConfig.GracePeriod <= 0 {
+ errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero())
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.FocusFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.SkipFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ _, err := ParseLabelFilter(suiteConfig.LabelFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "", "dup", "swap", "none":
+ default:
+ errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
+ }
+
+ numVerbosity := 0
+ for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
+ if v {
+ numVerbosity++
+ }
+ }
+ if numVerbosity > 1 {
+ errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
+ }
+
+ return errors
+}
+
+// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
+var GinkgoCLISharedFlags = GinkgoFlags{
+ {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
+ Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
+ {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "comma-separated list of packages",
+ Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
+ {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
+ {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
+ Usage: "When running multiple packages, the number of concurrent compilations to perform."},
+}
+
+// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
+var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "The number of parallel test nodes to run."},
+ {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "--nodes is an alias for --procs"},
+ {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
+ Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
+ {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Command to run when a test suite completes."},
+ {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
+ Usage: "A location to place all generated profiles and reports."},
+ {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
+ Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
+ {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
+ Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
+
+ {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
+var GinkgoCLIRunFlags = GinkgoFlags{
+ {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
+ {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
+ {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
+ Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
+ {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize the order in which test suites run."},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
+var GinkgoCLIWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
+ Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
+ {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "Regular Expression",
+ UsageDefaultValue: `\.go$`,
+ Usage: "Only files matching this regular expression will be watched for changes."},
+}
+
+// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
+var GoBuildFlags = GinkgoFlags{
+ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
+ Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
+ {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
+ Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
+ {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
+ Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
+ {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
+ Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
+
+ {KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
+ Usage: "force rebuilding of packages that are already up-to-date."},
+ {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool asm invocation."},
+ {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
+ Usage: "build mode to use. See 'go help buildmode' for more."},
+ {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build",
+ Usage: "adds version control information."},
+ {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
+ Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
+ {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each gccgo compiler/linker invocation."},
+ {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool compile invocation."},
+ {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
+ Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
+ {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool link invocation."},
+ {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
+ Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
+ {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
+ Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
+ {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
+ Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
+ {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
+ Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
+ {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
+ Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
+ {KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
+ Usage: "print the commands but do not run them."},
+ {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
+ Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
+ {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
+ Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
+ {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
+ Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
+ {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
+ Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."},
+ {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
+ Usage: "print the name of the temporary work directory and do not delete it when exiting."},
+ {KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
+ Usage: "print the commands."},
+ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
+ Usage: "output binary path (including name)."},
+}
+
+// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
+var GoRunFlags = GinkgoFlags{
+ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
+ {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
+ {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
+ {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
+ {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
+ Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
+ {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
+ Usage: `Write an execution trace to the specified file before exiting.`},
+}
+
+// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
+// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
+func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
+ errors := []error{}
+
+ if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
+ errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
+ }
+
+ //initialize the output directory
+ if cliConfig.OutputDir != "" {
+ err := os.MkdirAll(cliConfig.OutputDir, 0777)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ //ensure cover mode is configured appropriately
+ if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+ if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
+ goFlagsConfig.CoverProfile = "coverprofile.out"
+ }
+
+ return cliConfig, goFlagsConfig, errors
+}
+
+// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+ // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
+ // the built test binary can generate a coverprofile
+ if goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+
+ if goFlagsConfig.CoverPkg != "" {
+ coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
+ adjustedCoverPkgs := make([]string, len(coverPkgs))
+ for i, coverPkg := range coverPkgs {
+ coverPkg = strings.Trim(coverPkg, " ")
+ if strings.HasPrefix(coverPkg, "./") {
+ // this is a relative coverPkg - we need to reroot it
+ adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
+ } else {
+ // this is a package name - don't touch it
+ adjustedCoverPkgs[i] = coverPkg
+ }
+ }
+ goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
+ }
+
+ args := []string{"test", "-c", packageToBuild}
+ goArgs, err := GenerateFlagArgs(
+ GoBuildFlags,
+ map[string]interface{}{
+ "Go": &goFlagsConfig,
+ },
+ )
+
+ if err != nil {
+ return []string{}, err
+ }
+ args = append(args, goArgs...)
+ return args, nil
+}
+
+// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
+func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
+ var flags GinkgoFlags
+ flags = SuiteConfigFlags.WithPrefix("ginkgo")
+ flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
+ bindings := map[string]interface{}{
+ "S": &suiteConfig,
+ "R": &reporterConfig,
+ "Go": &goFlagsConfig,
+ }
+
+ return GenerateFlagArgs(flags, bindings)
+}
+
+// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
+func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
+ flags := GoRunFlags.WithPrefix("test")
+ bindings := map[string]interface{}{
+ "Go": &goFlagsConfig,
+ }
+
+ args, err := GenerateFlagArgs(flags, bindings)
+ if err != nil {
+ return args, err
+ }
+ args = append(args, "--test.v")
+ return args, nil
+}
+
+// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
+func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
+func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
+func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags
+ flags = flags.CopyAppend(GoBuildFlags...)
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Building Multiple Suites"
+ }
+ if flagSections[i].Key == "go-build" {
+ flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
+ Description: "These flags are inherited from go build."}
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
+
+func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Fetching Labels from Multiple Suites"
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
new file mode 100644
index 000000000..17922304b
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "strconv"
+ "time"
+)
+
+/*
+ A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
+*/
+
+type SuiteSummary = DeprecatedSuiteSummary
+type SetupSummary = DeprecatedSetupSummary
+type SpecSummary = DeprecatedSpecSummary
+type SpecMeasurement = DeprecatedSpecMeasurement
+type SpecComponentType = NodeType
+type SpecFailure = DeprecatedSpecFailure
+
+var (
+ SpecComponentTypeInvalid = NodeTypeInvalid
+ SpecComponentTypeContainer = NodeTypeContainer
+ SpecComponentTypeIt = NodeTypeIt
+ SpecComponentTypeBeforeEach = NodeTypeBeforeEach
+ SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
+ SpecComponentTypeAfterEach = NodeTypeAfterEach
+ SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
+ SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
+ SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
+ SpecComponentTypeAfterSuite = NodeTypeAfterSuite
+ SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
+)
+
+type DeprecatedSuiteSummary struct {
+ SuiteDescription string
+ SuiteSucceeded bool
+ SuiteID string
+
+ NumberOfSpecsBeforeParallelization int
+ NumberOfTotalSpecs int
+ NumberOfSpecsThatWillBeRun int
+ NumberOfPendingSpecs int
+ NumberOfSkippedSpecs int
+ NumberOfPassedSpecs int
+ NumberOfFailedSpecs int
+ NumberOfFlakedSpecs int
+ RunTime time.Duration
+}
+
+type DeprecatedSetupSummary struct {
+ ComponentType SpecComponentType
+ CodeLocation CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+
+ CapturedOutput string
+ SuiteID string
+}
+
+type DeprecatedSpecSummary struct {
+ ComponentTexts []string
+ ComponentCodeLocations []CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+ IsMeasurement bool
+ NumberOfSamples int
+ Measurements map[string]*DeprecatedSpecMeasurement
+
+ CapturedOutput string
+ SuiteID string
+}
+
+func (s DeprecatedSpecSummary) HasFailureState() bool {
+ return s.State.Is(SpecStateFailureStates)
+}
+
+func (s DeprecatedSpecSummary) TimedOut() bool {
+ return false
+}
+
+func (s DeprecatedSpecSummary) Panicked() bool {
+ return s.State == SpecStatePanicked
+}
+
+func (s DeprecatedSpecSummary) Failed() bool {
+ return s.State == SpecStateFailed
+}
+
+func (s DeprecatedSpecSummary) Passed() bool {
+ return s.State == SpecStatePassed
+}
+
+func (s DeprecatedSpecSummary) Skipped() bool {
+ return s.State == SpecStateSkipped
+}
+
+func (s DeprecatedSpecSummary) Pending() bool {
+ return s.State == SpecStatePending
+}
+
+type DeprecatedSpecFailure struct {
+ Message string
+ Location CodeLocation
+ ForwardedPanic string
+
+ ComponentIndex int
+ ComponentType SpecComponentType
+ ComponentCodeLocation CodeLocation
+}
+
+type DeprecatedSpecMeasurement struct {
+ Name string
+ Info interface{}
+ Order int
+
+ Results []float64
+
+ Smallest float64
+ Largest float64
+ Average float64
+ StdDeviation float64
+
+ SmallestLabel string
+ LargestLabel string
+ AverageLabel string
+ Units string
+ Precision int
+}
+
+func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
+ if s.Precision == 0 {
+ return "%f"
+ }
+
+ str := strconv.Itoa(s.Precision)
+
+ return "%." + str + "f"
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
new file mode 100644
index 000000000..e2519f673
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
@@ -0,0 +1,177 @@
+package types
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type Deprecation struct {
+ Message string
+ DocLink string
+ Version string
+}
+
+type deprecations struct{}
+
+var Deprecations = deprecations{}
+
+func (d deprecations) CustomReporter() Deprecation {
+ return Deprecation{
+ Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
+ DocLink: "removed-custom-reporters",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Async() Deprecation {
+ return Deprecation{
+ Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
+ DocLink: "removed-async-testing",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Measure() Deprecation {
+ return Deprecation{
+ Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
+ DocLink: "removed-measure",
+ Version: "1.16.3",
+ }
+}
+
+func (d deprecations) ParallelNode() Deprecation {
+ return Deprecation{
+ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
+ DocLink: "renamed-ginkgoparallelnode",
+ Version: "1.16.4",
+ }
+}
+
+func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
+ return Deprecation{
+ Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
+ DocLink: "changed-currentginkgotestdescription",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Convert() Deprecation {
+ return Deprecation{
+ Message: "The convert command is deprecated in Ginkgo V2",
+ DocLink: "removed-ginkgo-convert",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Blur() Deprecation {
+ return Deprecation{
+ Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Nodot() Deprecation {
+ return Deprecation{
+ Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
+ DocLink: "removed-ginkgo-nodot",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) SuppressProgressReporting() Deprecation {
+ return Deprecation{
+ Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
+ Version: "2.5.0",
+ }
+}
+
+type DeprecationTracker struct {
+ deprecations map[Deprecation][]CodeLocation
+ lock *sync.Mutex
+}
+
+func NewDeprecationTracker() *DeprecationTracker {
+ return &DeprecationTracker{
+ deprecations: map[Deprecation][]CodeLocation{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
+ ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
+ if deprecation.Version != "" && ackVersion != "" {
+ ack := ParseSemVer(ackVersion)
+ version := ParseSemVer(deprecation.Version)
+ if ack.GreaterThanOrEqualTo(version) {
+ return
+ }
+ }
+
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ if len(cl) == 1 {
+ d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
+ } else {
+ d.deprecations[deprecation] = []CodeLocation{}
+ }
+}
+
+func (d *DeprecationTracker) DidTrackDeprecations() bool {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ return len(d.deprecations) > 0
+}
+
+func (d *DeprecationTracker) DeprecationsReport() string {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ out += formatter.F("{{light-yellow}}============================================={{/}}\n")
+ for deprecation, locations := range d.deprecations {
+ out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
+ if deprecation.DocLink != "" {
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
+ }
+ for _, location := range locations {
+ out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
+ }
+ }
+ out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
+ out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
+ return out
+}
+
+type SemVer struct {
+ Major int
+ Minor int
+ Patch int
+}
+
+func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
+ return (s.Major > o.Major) ||
+ (s.Major == o.Major && s.Minor > o.Minor) ||
+ (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
+}
+
+func ParseSemVer(semver string) SemVer {
+ out := SemVer{}
+ semver = strings.TrimFunc(semver, func(r rune) bool {
+ return !(unicode.IsNumber(r) || r == '.')
+ })
+ components := strings.Split(semver, ".")
+ if len(components) > 0 {
+ out.Major, _ = strconv.Atoi(components[0])
+ }
+ if len(components) > 1 {
+ out.Minor, _ = strconv.Atoi(components[1])
+ }
+ if len(components) > 2 {
+ out.Patch, _ = strconv.Atoi(components[2])
+ }
+ return out
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
new file mode 100644
index 000000000..1d96ae028
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
@@ -0,0 +1,43 @@
+package types
+
+import "encoding/json"
+
+type EnumSupport struct {
+ toString map[uint]string
+ toEnum map[string]uint
+ maxEnum uint
+}
+
+func NewEnumSupport(toString map[uint]string) EnumSupport {
+ toEnum, maxEnum := map[string]uint{}, uint(0)
+ for k, v := range toString {
+ toEnum[v] = k
+ if maxEnum < k {
+ maxEnum = k
+ }
+ }
+ return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
+}
+
+func (es EnumSupport) String(e uint) string {
+ if e > es.maxEnum {
+ return es.toString[0]
+ }
+ return es.toString[e]
+}
+
+func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
+ var dec string
+ if err := json.Unmarshal(b, &dec); err != nil {
+ return 0, err
+ }
+ out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
+ return out, nil
+}
+
+func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
+ if e == 0 || e > es.maxEnum {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(es.toString[e])
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
new file mode 100644
index 000000000..6bb72d00c
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -0,0 +1,639 @@
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoError struct {
+ Heading string
+ Message string
+ DocLink string
+ CodeLocation CodeLocation
+}
+
+func (g GinkgoError) Error() string {
+ out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading)
+ if (g.CodeLocation != CodeLocation{}) {
+ contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ")
+ if contentsOfLine != "" {
+ out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine)
+ }
+ out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation)
+ }
+ if g.Message != "" {
+ out += formatter.Fiw(1, formatter.COLS, g.Message)
+ out += "\n\n"
+ }
+ if g.DocLink != "" {
+ out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink)
+ }
+
+ return out
+}
+
+type ginkgoErrors struct{}
+
+var GinkgoErrors = ginkgoErrors{}
+
+func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Your Test Panicked",
+ Message: `When you, or your assertion library, calls Ginkgo's Fail(),
+Ginkgo panics to prevent subsequent assertions from running.
+
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+However, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+
+Alternatively, you may have made an assertion outside of a Ginkgo
+leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to
+an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`,
+ DocLink: "mental-model-how-ginkgo-handles-failure",
+ CodeLocation: cl,
+ }
+}
+
+func (g ginkgoErrors) RerunningSuite() error {
+ return GinkgoError{
+ Heading: "Rerunning Suite",
+ Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`),
+ DocLink: "repeating-spec-runs-and-managing-flaky-specs",
+ }
+}
+
+/* Tree construction errors */
+
+func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node
+to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running.
+
+To enable randomization and parallelization Ginkgo requires the spec tree
+to be fully constructed up front. In practice, this means that you can
+only create nodes like {{bold}}[%s]{{/}} at the top-level or within the
+body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy",
+ }
+}
+
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Assertion or Panic detected during tree construction",
+ Message: formatter.F(
+ `Ginkgo detected a panic while constructing the spec tree.
+You may be trying to make an assertion in the body of a container node
+(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}).
+
+Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}},
+{{bold}}It{{/}}, etc.
+
+{{bold}}Here's the content of the panic that was caught:{{/}}
+%v`, caughtPanic),
+ CodeLocation: cl,
+ DocLink: "no-assertions-in-container-nodes",
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node but
+you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}.
+
+Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown),
+ CodeLocation: cl,
+ DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite",
+ }
+}
+
+/* Decorator errors */
+func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error {
+ return GinkgoError{
+ Heading: "Invalid Decorator",
+ Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: Focused and Pending",
+ Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly",
+ Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Decorator",
+ Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ if nodeType.Is(NodeTypeContainer) {
+ mustGet = "{{bold}}func(){{/}}"
+ }
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed `+mustGet+`.
+You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Multiple Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Missing Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+/* Ordered Container errors */
+func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Serial Node in Non-Serial Ordered Container",
+ Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Setup Node not in Ordered Container",
+ Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType),
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
+ Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+/* DeferCleanup errors */
+func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup requires a valid function",
+ Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup must be called inside a setup or subject node",
+ Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
+ Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup cannot be called in a DeferCleanup callback",
+ Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+/* ReportEntry errors */
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+ return GinkgoError{
+ Heading: "Too Many ReportEntry Values",
+ Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+/* By errors */
+func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "documenting-complex-specs-by",
+ }
+}
+
+/* FileFilter and SkipFilter errors */
+func (g ginkgoErrors) InvalidFileFilter(filter string) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter",
+ Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter),
+ DocLink: "filtering-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter Regular Expression",
+ Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err),
+ DocLink: "filtering-specs",
+ }
+}
+
+/* Label Errors */
+func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error {
+ var message string
+ if location >= 0 {
+ for i, r := range input {
+ if i == location {
+ message += "{{red}}{{bold}}{{underline}}"
+ }
+ message += string(r)
+ if i == location {
+ message += "{{/}}"
+ }
+ }
+ } else {
+ message = input
+ }
+ message += "\n" + error
+ return GinkgoError{
+ Heading: "Syntax Error Parsing Label Filter",
+ Message: message,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Label",
+ Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label),
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty Label",
+ Message: "Labels cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+/* Table errors */
+func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed multiple functions",
+ Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Entry description",
+ Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "No parameters have been passed to the Table Function",
+ Message: "The Table Function expected at least 1 parameter",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed incorrect parameter type",
+ Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too many parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Contexts cannot be used in subtree tables",
+ Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+/* Parallel Synchronization errors */
+
+func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
+ return GinkgoError{
+ Heading: "Test Report unavailable because a Ginkgo parallel process disappeared",
+ Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error {
+ return GinkgoError{
+ Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1",
+ Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
+ return GinkgoError{
+ Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back",
+ Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.",
+ }
+}
+
+/* Configuration errors */
+
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Type passed to RunSpecs",
+ Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
+ }
+}
+
+var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead."
+
+func (g ginkgoErrors) InvalidParallelTotalConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.total must be >= 1",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) InvalidParallelProcessConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) MissingParallelHostConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.host is missing",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) UnreachableParallelHost(host string) error {
+ return GinkgoError{
+ Heading: "Could not reach ginkgo.parallel.host:" + host,
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) DryRunInParallelConfiguration() error {
+ return GinkgoError{
+ Heading: "Ginkgo only performs -dryRun in serial mode.",
+ Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.",
+ }
+}
+
+func (g ginkgoErrors) GracePeriodCannotBeZero() error {
+ return GinkgoError{
+ Heading: "Ginkgo requires a positive --grace-period.",
+ Message: "Please set --grace-period to a positive duration. The default is 30s.",
+ }
+}
+
+func (g ginkgoErrors) ConflictingVerbosityConfiguration() error {
+ return GinkgoError{
+ Heading: "Conflicting reporter verbosity settings.",
+ Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!",
+ }
+}
+
+func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value),
+ Message: "You must choose one of 'dup', 'swap', or 'none'.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagCount() error {
+ return GinkgoError{
+ Heading: "Use of go test -count",
+ Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagParallel() error {
+ return GinkgoError{
+ Heading: "Use of go test -parallel",
+ Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.",
+ }
+}
+
+func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
+ return GinkgoError{
+ Heading: "--repeat and --until-it-fails are both set",
+ Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?",
+ }
+}
+
+/* Stack-Trace parsing errors */
+
+func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
+ return GinkgoError{
+ Heading: "Failed to Parse Stack Trace",
+ Message: message,
+ }
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
new file mode 100644
index 000000000..cc21df71e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
@@ -0,0 +1,106 @@
+package types
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+func ParseFileFilters(filters []string) (FileFilters, error) {
+ ffs := FileFilters{}
+ for _, filter := range filters {
+ ff := FileFilter{}
+ if filter == "" {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ components := strings.Split(filter, ":")
+ if !(len(components) == 1 || len(components) == 2) {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+
+ var err error
+ ff.Filename, err = regexp.Compile(components[0])
+ if err != nil {
+ return nil, err
+ }
+ if len(components) == 2 {
+ lineFilters := strings.Split(components[1], ",")
+ for _, lineFilter := range lineFilters {
+ components := strings.Split(lineFilter, "-")
+ if len(components) == 1 {
+ line, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1})
+ } else if len(components) == 2 {
+ line1, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ line2, err := strconv.Atoi(strings.TrimSpace(components[1]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2})
+ } else {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ }
+ }
+ ffs = append(ffs, ff)
+ }
+ return ffs, nil
+}
+
+type FileFilter struct {
+ Filename *regexp.Regexp
+ LineFilters LineFilters
+}
+
+func (f FileFilter) Matches(locations []CodeLocation) bool {
+ for _, location := range locations {
+ if f.Filename.MatchString(location.FileName) &&
+ f.LineFilters.Matches(location.LineNumber) {
+ return true
+ }
+
+ }
+ return false
+}
+
+type FileFilters []FileFilter
+
+func (ffs FileFilters) Matches(locations []CodeLocation) bool {
+ for _, ff := range ffs {
+ if ff.Matches(locations) {
+ return true
+ }
+ }
+
+ return false
+}
+
+type LineFilter struct {
+ Min int
+ Max int
+}
+
+func (lf LineFilter) Matches(line int) bool {
+ return lf.Min <= line && line < lf.Max
+}
+
+type LineFilters []LineFilter
+
+func (lfs LineFilters) Matches(line int) bool {
+ if len(lfs) == 0 {
+ return true
+ }
+
+ for _, lf := range lfs {
+ if lf.Matches(line) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
new file mode 100644
index 000000000..de69f3022
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -0,0 +1,490 @@
+package types
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoFlag struct {
+ Name string
+ KeyPath string
+ SectionKey string
+
+ Usage string
+ UsageArgument string
+ UsageDefaultValue string
+
+ DeprecatedName string
+ DeprecatedDocLink string
+ DeprecatedVersion string
+
+ ExportAs string
+ AlwaysExport bool
+}
+
+type GinkgoFlags []GinkgoFlag
+
+func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags {
+ out := GinkgoFlags{}
+ out = append(out, f...)
+ out = append(out, flags...)
+ return out
+}
+
+func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags {
+ if prefix == "" {
+ return f
+ }
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ if flag.Name != "" {
+ flag.Name = prefix + "." + flag.Name
+ }
+ if flag.DeprecatedName != "" {
+ flag.DeprecatedName = prefix + "." + flag.DeprecatedName
+ }
+ if flag.ExportAs != "" {
+ flag.ExportAs = prefix + "." + flag.ExportAs
+ }
+ out = append(out, flag)
+ }
+ return out
+}
+
+func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags {
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ for _, name := range names {
+ if flag.Name == name {
+ out = append(out, flag)
+ break
+ }
+ }
+ }
+ return out
+}
+
+type GinkgoFlagSection struct {
+ Key string
+ Style string
+ Succinct bool
+ Heading string
+ Description string
+}
+
+type GinkgoFlagSections []GinkgoFlagSection
+
+func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
+ for _, section := range gfs {
+ if section.Key == key {
+ return section, true
+ }
+ }
+
+ return GinkgoFlagSection{}, false
+}
+
+type GinkgoFlagSet struct {
+ flags GinkgoFlags
+ bindings interface{}
+
+ sections GinkgoFlagSections
+ extraGoFlagsSection GinkgoFlagSection
+
+ flagSet *flag.FlagSet
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ }, nil)
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ extraGoFlagsSection: extraGoFlagsSection,
+ }, flagSet)
+}
+
+func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) {
+ if flagSet == nil {
+ f.flagSet = flag.NewFlagSet("", flag.ContinueOnError)
+ //suppress all output as Ginkgo is responsible for formatting usage
+ f.flagSet.SetOutput(io.Discard)
+ } else {
+ f.flagSet = flagSet
+ //we're piggybacking on an existing flagset (typically go test) so we have limited control
+ //on user feedback
+ f.flagSet.Usage = f.substituteUsage
+ }
+
+ for _, flag := range f.flags {
+ name := flag.Name
+
+ deprecatedUsage := "[DEPRECATED]"
+ deprecatedName := flag.DeprecatedName
+ if name != "" {
+ deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name)
+ } else if flag.Usage != "" {
+ deprecatedUsage += " " + flag.Usage
+ }
+
+ value, ok := valueAtKeyPath(f.bindings, flag.KeyPath)
+ if !ok {
+ return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface, addr := value.Interface(), value.Addr().Interface()
+
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if name != "" {
+ f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage)
+ }
+ case reflect.TypeOf(int64(0)):
+ if name != "" {
+ f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage)
+ }
+ case reflect.TypeOf(float64(0)):
+ if name != "" {
+ f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage)
+ }
+ case reflect.TypeOf(int(0)):
+ if name != "" {
+ f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage)
+ }
+ case reflect.TypeOf(bool(true)):
+ if name != "" {
+ f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage)
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if name != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage)
+ }
+
+ case reflect.TypeOf([]string{}):
+ if name != "" {
+ f.flagSet.Var(stringSliceVar{value}, name, flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage)
+ }
+ default:
+ return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return f, nil
+}
+
+func (f GinkgoFlagSet) IsZero() bool {
+ return f.flagSet == nil
+}
+
+func (f GinkgoFlagSet) WasSet(name string) bool {
+ found := false
+ f.flagSet.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ found = true
+ }
+ })
+
+ return found
+}
+
+func (f GinkgoFlagSet) Lookup(name string) *flag.Flag {
+ return f.flagSet.Lookup(name)
+}
+
+func (f GinkgoFlagSet) Parse(args []string) ([]string, error) {
+ if f.IsZero() {
+ return args, nil
+ }
+ err := f.flagSet.Parse(args)
+ if err != nil {
+ return []string{}, err
+ }
+ return f.flagSet.Args(), nil
+}
+
+func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) {
+ if f.IsZero() {
+ return
+ }
+ f.flagSet.Visit(func(flag *flag.Flag) {
+ for _, ginkgoFlag := range f.flags {
+ if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) {
+ message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName)
+ if ginkgoFlag.Name != "" {
+ message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name)
+ } else if ginkgoFlag.Usage != "" {
+ message += " " + ginkgoFlag.Usage
+ }
+
+ deprecationTracker.TrackDeprecation(Deprecation{
+ Message: message,
+ DocLink: ginkgoFlag.DeprecatedDocLink,
+ Version: ginkgoFlag.DeprecatedVersion,
+ })
+ }
+ }
+ })
+}
+
+func (f GinkgoFlagSet) Usage() string {
+ if f.IsZero() {
+ return ""
+ }
+ groupedFlags := map[GinkgoFlagSection]GinkgoFlags{}
+ ungroupedFlags := GinkgoFlags{}
+ managedFlags := map[string]bool{}
+ extraGoFlags := []*flag.Flag{}
+
+ for _, flag := range f.flags {
+ managedFlags[flag.Name] = true
+ managedFlags[flag.DeprecatedName] = true
+
+ if flag.Name == "" {
+ continue
+ }
+
+ section, ok := f.sections.Lookup(flag.SectionKey)
+ if ok {
+ groupedFlags[section] = append(groupedFlags[section], flag)
+ } else {
+ ungroupedFlags = append(ungroupedFlags, flag)
+ }
+ }
+
+ f.flagSet.VisitAll(func(flag *flag.Flag) {
+ if !managedFlags[flag.Name] {
+ extraGoFlags = append(extraGoFlags, flag)
+ }
+ })
+
+ out := ""
+ for _, section := range f.sections {
+ flags := groupedFlags[section]
+ if len(flags) == 0 {
+ continue
+ }
+ out += f.usageForSection(section)
+ if section.Succinct {
+ succinctFlags := []string{}
+ for _, flag := range flags {
+ if flag.Name != "" {
+ succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name))
+ }
+ }
+ out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n")
+ } else {
+ for _, flag := range flags {
+ out += f.usageForFlag(flag, section.Style)
+ }
+ }
+ out += "\n"
+ }
+ if len(ungroupedFlags) > 0 {
+ for _, flag := range ungroupedFlags {
+ out += f.usageForFlag(flag, "")
+ }
+ out += "\n"
+ }
+ if len(extraGoFlags) > 0 {
+ out += f.usageForSection(f.extraGoFlagsSection)
+ for _, goFlag := range extraGoFlags {
+ out += f.usageForGoFlag(goFlag)
+ }
+ }
+
+ return out
+}
+
+func (f GinkgoFlagSet) substituteUsage() {
+ fmt.Fprintln(f.flagSet.Output(), f.Usage())
+}
+
+func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+ if len(keyPath) == 0 {
+ return reflect.Value{}, false
+ }
+
+ val := reflect.ValueOf(root)
+ components := strings.Split(keyPath, ".")
+ for _, component := range components {
+ val = reflect.Indirect(val)
+ switch val.Kind() {
+ case reflect.Map:
+ val = val.MapIndex(reflect.ValueOf(component))
+ if val.Kind() == reflect.Interface {
+ val = reflect.ValueOf(val.Interface())
+ }
+ case reflect.Struct:
+ val = val.FieldByName(component)
+ default:
+ return reflect.Value{}, false
+ }
+ if (val == reflect.Value{}) {
+ return reflect.Value{}, false
+ }
+ }
+
+ return val, true
+}
+
+func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string {
+ out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n")
+ if section.Description != "" {
+ out += formatter.Fiw(0, formatter.COLS, section.Description+"\n")
+ }
+ return out
+}
+
+func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string {
+ argument := flag.UsageArgument
+ defValue := flag.UsageDefaultValue
+ if argument == "" {
+ value, _ := valueAtKeyPath(f.bindings, flag.KeyPath)
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ argument = "string"
+ case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)):
+ argument = "int"
+ case reflect.TypeOf(time.Duration(0)):
+ argument = "duration"
+ case reflect.TypeOf(float64(0)):
+ argument = "float"
+ case reflect.TypeOf([]string{}):
+ argument = "string"
+ }
+ }
+ if argument != "" {
+ argument = "[" + argument + "] "
+ }
+ if defValue != "" {
+ defValue = fmt.Sprintf("(default: %s)", defValue)
+ }
+ hyphens := "--"
+ if len(flag.Name) == 1 {
+ hyphens = "-"
+ }
+
+ out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue)
+ out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage)
+ return out
+}
+
+func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string {
+ //Taken directly from the flag package
+ out := fmt.Sprintf(" -%s", goFlag.Name)
+ name, usage := flag.UnquoteUsage(goFlag)
+ if len(name) > 0 {
+ out += " " + name
+ }
+ if len(out) <= 4 {
+ out += "\t"
+ } else {
+ out += "\n \t"
+ }
+ out += strings.ReplaceAll(usage, "\n", "\n \t")
+ out += "\n"
+ return out
+}
+
+type stringSliceVar struct {
+ slice reflect.Value
+}
+
+func (ssv stringSliceVar) String() string { return "" }
+func (ssv stringSliceVar) Set(s string) error {
+ ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s})))
+ return nil
+}
+
+// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+ result := []string{}
+ for _, flag := range flags {
+ name := flag.ExportAs
+ if name == "" {
+ name = flag.Name
+ }
+ if name == "" {
+ continue
+ }
+
+ value, ok := valueAtKeyPath(bindings, flag.KeyPath)
+ if !ok {
+ return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface := value.Interface()
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if iface.(string) != "" || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+ case reflect.TypeOf(int64(0)):
+ if iface.(int64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(float64(0)):
+ if iface.(float64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%f", name, iface))
+ }
+ case reflect.TypeOf(int(0)):
+ if iface.(int) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(bool(true)):
+ if iface.(bool) {
+ result = append(result, fmt.Sprintf("--%s", name))
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+
+ case reflect.TypeOf([]string{}):
+ strings := iface.([]string)
+ for _, s := range strings {
+ result = append(result, fmt.Sprintf("--%s=%s", name, s))
+ }
+ default:
+ return []string{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
new file mode 100644
index 000000000..7fdc8aa23
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -0,0 +1,583 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var DEBUG_LABEL_FILTER_PARSING = false
+
+type LabelFilter func([]string) bool
+
+func matchLabelAction(label string) LabelFilter {
+ expected := strings.ToLower(label)
+ return func(labels []string) bool {
+ for i := range labels {
+ if strings.ToLower(labels[i]) == expected {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter {
+ return func(labels []string) bool {
+ for i := range labels {
+ if regex.MatchString(labels[i]) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func notAction(filter LabelFilter) LabelFilter {
+ return func(labels []string) bool { return !filter(labels) }
+}
+
+func andAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) && b(labels) }
+}
+
+func orAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) || b(labels) }
+}
+
+func labelSetFor(key string, labels []string) map[string]bool {
+ key = strings.ToLower(strings.TrimSpace(key))
+ out := map[string]bool{}
+ for _, label := range labels {
+ components := strings.SplitN(label, ":", 2)
+ if len(components) < 2 {
+ continue
+ }
+ if key == strings.ToLower(strings.TrimSpace(components[0])) {
+ out[strings.ToLower(strings.TrimSpace(components[1]))] = true
+ }
+ }
+
+ return out
+}
+
+func isEmptyLabelSetAction(key string) LabelFilter {
+ return func(labels []string) bool {
+ return len(labelSetFor(key, labels)) == 0
+ }
+}
+
+func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if set[value] {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ if len(set) != len(expectedValues) {
+ return false
+ }
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ expectedSet := map[string]bool{}
+ for _, value := range expectedValues {
+ expectedSet[value] = true
+ }
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for value := range set {
+ if !expectedSet[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+type lfToken uint
+
+const (
+ lfTokenInvalid lfToken = iota
+
+ lfTokenRoot
+ lfTokenOpenGroup
+ lfTokenCloseGroup
+ lfTokenNot
+ lfTokenAnd
+ lfTokenOr
+ lfTokenRegexp
+ lfTokenLabel
+ lfTokenSetKey
+ lfTokenSetOperation
+ lfTokenSetArgument
+ lfTokenEOF
+)
+
+func (l lfToken) Precedence() int {
+ switch l {
+ case lfTokenRoot, lfTokenOpenGroup:
+ return 0
+ case lfTokenOr:
+ return 1
+ case lfTokenAnd:
+ return 2
+ case lfTokenNot:
+ return 3
+ case lfTokenSetOperation:
+ return 4
+ }
+ return -1
+}
+
+func (l lfToken) String() string {
+ switch l {
+ case lfTokenRoot:
+ return "ROOT"
+ case lfTokenOpenGroup:
+ return "("
+ case lfTokenCloseGroup:
+ return ")"
+ case lfTokenNot:
+ return "!"
+ case lfTokenAnd:
+ return "&&"
+ case lfTokenOr:
+ return "||"
+ case lfTokenRegexp:
+ return "/regexp/"
+ case lfTokenLabel:
+ return "label"
+ case lfTokenSetKey:
+ return "set_key"
+ case lfTokenSetOperation:
+ return "set_operation"
+ case lfTokenSetArgument:
+ return "set_argument"
+ case lfTokenEOF:
+ return "EOF"
+ }
+ return "INVALID"
+}
+
+type treeNode struct {
+ token lfToken
+ location int
+ value string
+
+ parent *treeNode
+ leftNode *treeNode
+ rightNode *treeNode
+}
+
+func (tn *treeNode) setRightNode(node *treeNode) {
+ tn.rightNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) setLeftNode(node *treeNode) {
+ tn.leftNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode {
+ if tn.token.Precedence() <= precedence {
+ return tn
+ }
+ return tn.parent.firstAncestorWithPrecedenceLEQ(precedence)
+}
+
+func (tn *treeNode) firstUnmatchedOpenNode() *treeNode {
+ if tn.token == lfTokenOpenGroup {
+ return tn
+ }
+ if tn.parent == nil {
+ return nil
+ }
+ return tn.parent.firstUnmatchedOpenNode()
+}
+
+func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
+ switch tn.token {
+ case lfTokenOpenGroup:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.")
+ case lfTokenLabel:
+ return matchLabelAction(tn.value), nil
+ case lfTokenRegexp:
+ re, err := regexp.Compile(tn.value)
+ if err != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
+ }
+ return matchLabelRegexAction(re), nil
+ case lfTokenSetOperation:
+ tokenSetOperation := strings.ToLower(tn.value)
+ if tokenSetOperation == "isempty" {
+ return isEmptyLabelSetAction(tn.leftNode.value), nil
+ }
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
+ }
+
+ rawValues := strings.Split(tn.rightNode.value, ",")
+ values := make([]string, len(rawValues))
+ for i := range rawValues {
+ values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
+ if strings.ContainsAny(values[i], "&|!,()/") {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
+ } else if values[i] == "" {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
+ }
+ }
+ switch tokenSetOperation {
+ case "containsany":
+ return containsAnyLabelSetAction(tn.leftNode.value, values), nil
+ case "containsall":
+ return containsAllLabelSetAction(tn.leftNode.value, values), nil
+ case "consistsof":
+ return consistsOfLabelSetAction(tn.leftNode.value, values), nil
+ case "issubsetof":
+ return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
+ }
+ }
+
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.")
+ }
+ rightLF, err := tn.rightNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenRoot, lfTokenCloseGroup:
+ return rightLF, nil
+ case lfTokenNot:
+ return notAction(rightLF), nil
+ }
+
+ if tn.leftNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token))
+ }
+ leftLF, err := tn.leftNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenAnd:
+ return andAction(leftLF, rightLF), nil
+ case lfTokenOr:
+ return orAction(leftLF, rightLF), nil
+ }
+
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token))
+}
+
+func (tn *treeNode) tokenString() string {
+ out := fmt.Sprintf("<%s", tn.token)
+ if tn.value != "" {
+ out += " | " + tn.value
+ }
+ out += ">"
+ return out
+}
+
+func (tn *treeNode) toString(indent int) string {
+ out := tn.tokenString() + "\n"
+ if tn.leftNode != nil {
+ out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1))
+ }
+ if tn.rightNode != nil {
+ out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1))
+ }
+ return out
+}
+
+var validSetOperations = map[string]string{
+ "containsany": "containsAny",
+ "containsall": "containsAll",
+ "consistsof": "consistsOf",
+ "issubsetof": "isSubsetOf",
+ "isempty": "isEmpty",
+}
+
+func tokenize(input string) func() (*treeNode, error) {
+ lastToken := lfTokenInvalid
+ lastValue := ""
+ runes, i := []rune(input), 0
+
+ peekIs := func(r rune) bool {
+ if i+1 < len(runes) {
+ return runes[i+1] == r
+ }
+ return false
+ }
+
+ consumeUntil := func(cutset string) (string, int) {
+ j := i
+ for ; j < len(runes); j++ {
+ if strings.IndexRune(cutset, runes[j]) >= 0 {
+ break
+ }
+ }
+ return string(runes[i:j]), j - i
+ }
+
+ return func() (*treeNode, error) {
+ for i < len(runes) && runes[i] == ' ' {
+ i += 1
+ }
+
+ if i >= len(runes) {
+ return &treeNode{token: lfTokenEOF}, nil
+ }
+
+ node := &treeNode{location: i}
+ defer func() {
+ lastToken = node.token
+ lastValue = node.value
+ }()
+
+ if lastToken == lfTokenSetKey {
+ //we should get a valid set operation next
+ value, n := consumeUntil(" )")
+ if validSetOperations[strings.ToLower(value)] == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
+ }
+ i += n
+ node.token, node.value = lfTokenSetOperation, value
+ return node, nil
+ }
+ if lastToken == lfTokenSetOperation {
+ //we should get an argument next, if we aren't isempty
+ var arg = ""
+ origI := i
+ if runes[i] == '{' {
+ i += 1
+ value, n := consumeUntil("}")
+ if i+n >= len(runes) {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
+ }
+ i += n + 1
+ arg = value
+ } else {
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ arg = strings.TrimSpace(value)
+ }
+ if strings.ToLower(lastValue) == "isempty" && arg != "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
+ }
+ if arg == "" && strings.ToLower(lastValue) != "isempty" {
+ if i < len(runes) && runes[i] == '/' {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
+ } else {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
+ }
+ }
+ // note that we sent an empty SetArgument token if we are isempty
+ node.token, node.value = lfTokenSetArgument, arg
+ return node, nil
+ }
+
+ switch runes[i] {
+ case '&':
+ if !peekIs('&') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?")
+ }
+ i += 2
+ node.token = lfTokenAnd
+ case '|':
+ if !peekIs('|') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?")
+ }
+ i += 2
+ node.token = lfTokenOr
+ case '!':
+ i += 1
+ node.token = lfTokenNot
+ case ',':
+ i += 1
+ node.token = lfTokenOr
+ case '(':
+ i += 1
+ node.token = lfTokenOpenGroup
+ case ')':
+ i += 1
+ node.token = lfTokenCloseGroup
+ case '/':
+ i += 1
+ value, n := consumeUntil("/")
+ i += n + 1
+ node.token, node.value = lfTokenRegexp, value
+ default:
+ value, n := consumeUntil("&|!,()/:")
+ i += n
+ value = strings.TrimSpace(value)
+
+ //are we the beginning of a set operation?
+ if i < len(runes) && runes[i] == ':' {
+ if peekIs(' ') {
+ if value == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
+ }
+ i += 1
+ //we are the beginning of a set operation
+ node.token, node.value = lfTokenSetKey, value
+ return node, nil
+ }
+ additionalValue, n := consumeUntil("&|!,()/")
+ additionalValue = strings.TrimSpace(additionalValue)
+ if additionalValue == ":" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
+ }
+ i += n
+ value += additionalValue
+ }
+
+ valueToCheckForSetOperation := strings.ToLower(value)
+ for setOperation := range validSetOperations {
+ idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
+ if idx > 0 {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
+ }
+ }
+
+ node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
+ }
+ return node, nil
+ }
+}
+
+func MustParseLabelFilter(input string) LabelFilter {
+ filter, err := ParseLabelFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseLabelFilter(input string) (LabelFilter, error) {
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Println("\n==============")
+ fmt.Println("Input: ", input)
+ fmt.Print("Tokens: ")
+ }
+ if input == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+ nextToken := tokenize(input)
+
+ root := &treeNode{token: lfTokenRoot}
+ current := root
+LOOP:
+ for {
+ node, err := nextToken()
+ if err != nil {
+ return nil, err
+ }
+
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Print(node.tokenString() + " ")
+ }
+
+ switch node.token {
+ case lfTokenEOF:
+ break LOOP
+ case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
+ }
+ current.setRightNode(node)
+ case lfTokenNot, lfTokenOpenGroup:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ current = node
+ case lfTokenAnd, lfTokenOr:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token))
+ }
+ nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence())
+ node.setLeftNode(nodeToStealFrom.rightNode)
+ nodeToStealFrom.setRightNode(node)
+ current = node
+ case lfTokenSetOperation:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
+ }
+ node.setLeftNode(current.rightNode)
+ current.setRightNode(node)
+ current = node
+ case lfTokenSetArgument:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ case lfTokenCloseGroup:
+ firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
+ if firstUnmatchedOpenNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.")
+ }
+ if firstUnmatchedOpenNode == current && current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.")
+ }
+ firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed
+ current = firstUnmatchedOpenNode.parent
+ default:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token))
+ }
+ }
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Printf("\n Tree:\n%s", root.toString(0))
+ }
+ return root.constructLabelFilter(input)
+}
+
+func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
+ out := strings.TrimSpace(label)
+ if out == "" {
+ return "", GinkgoErrors.InvalidEmptyLabel(cl)
+ }
+ if strings.ContainsAny(out, "&|!,()/") {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if out[0] == ':' {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if strings.Contains(out, ":") {
+ components := strings.SplitN(out, ":", 2)
+ if len(components) < 2 || components[1] == "" {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
new file mode 100644
index 000000000..7b1524b52
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -0,0 +1,190 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
+// and across the network connection when running in parallel
+type ReportEntryValue struct {
+ raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ AsJSON string
+ Representation string
+}
+
+func WrapEntryValue(value interface{}) ReportEntryValue {
+ return ReportEntryValue{
+ raw: value,
+ }
+}
+
+func (rev ReportEntryValue) GetRawValue() interface{} {
+ return rev.raw
+}
+
+func (rev ReportEntryValue) String() string {
+ if rev.raw == nil {
+ return ""
+ }
+ if colorableStringer, ok := rev.raw.(ColorableStringer); ok {
+ return colorableStringer.ColorableString()
+ }
+
+ if stringer, ok := rev.raw.(fmt.Stringer); ok {
+ return stringer.String()
+ }
+ if rev.Representation != "" {
+ return rev.Representation
+ }
+ return fmt.Sprintf("%+v", rev.raw)
+}
+
+func (rev ReportEntryValue) MarshalJSON() ([]byte, error) {
+ //All this to capture the representation at encoding-time, not creating time
+ //This way users can Report on pointers and get their final values at reporting-time
+ out := struct {
+ AsJSON string
+ Representation string
+ }{
+ Representation: rev.String(),
+ }
+ asJSON, err := json.Marshal(rev.raw)
+ if err != nil {
+ return nil, err
+ }
+ out.AsJSON = string(asJSON)
+
+ return json.Marshal(out)
+}
+
+func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error {
+ in := struct {
+ AsJSON string
+ Representation string
+ }{}
+ err := json.Unmarshal(data, &in)
+ if err != nil {
+ return err
+ }
+ rev.AsJSON = in.AsJSON
+ rev.Representation = in.Representation
+ return json.Unmarshal([]byte(in.AsJSON), &(rev.raw))
+}
+
+func (rev ReportEntryValue) GobEncode() ([]byte, error) {
+ return rev.MarshalJSON()
+}
+
+func (rev *ReportEntryValue) GobDecode(data []byte) error {
+ return rev.UnmarshalJSON(data)
+}
+
+// ReportEntry captures information attached to `SpecReport` via `AddReportEntry`
+type ReportEntry struct {
+ // Visibility captures the visibility policy for this ReportEntry
+ Visibility ReportEntryVisibility
+ // Location captures the location of the AddReportEntry call
+ Location CodeLocation
+
+ Time time.Time //need this for backwards compatibility
+ TimelineLocation TimelineLocation
+
+ // Name captures the name of this report
+ Name string
+ // Value captures the (optional) object passed into AddReportEntry - this can be
+ // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make
+ // encoding/decoding the value easier. To access the raw value call entry.GetRawValue()
+ Value ReportEntryValue
+}
+
+// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation.
+type ColorableStringer interface {
+ ColorableString() string
+}
+
+// StringRepresentation() returns the string representation of the value associated with the ReportEntry --
+// if value is nil, empty string is returned
+// if value is a `ColorableStringer` then `Value.ColorableString()` is returned
+// if value is a `fmt.Stringer` then `Value.String()` is returned
+// otherwise the value is formatted with "%+v"
+func (entry ReportEntry) StringRepresentation() string {
+ return entry.Value.String()
+}
+
+// GetRawValue returns the Value object that was passed to AddReportEntry
+// If called in-process this will be the same object that was passed into AddReportEntry.
+// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
+// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
+// field yourself.
+func (entry ReportEntry) GetRawValue() interface{} {
+ return entry.Value.GetRawValue()
+}
+
+func (entry ReportEntry) GetTimelineLocation() TimelineLocation {
+ return entry.TimelineLocation
+}
+
+type ReportEntries []ReportEntry
+
+func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool {
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ return true
+ }
+ }
+ return false
+}
+
+func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries {
+ out := ReportEntries{}
+
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ out = append(out, entry)
+ }
+ }
+
+ return out
+}
+
+// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+type ReportEntryVisibility uint
+
+const (
+ // Always print out this ReportEntry
+ ReportEntryVisibilityAlways ReportEntryVisibility = iota
+ // Only print out this ReportEntry if the spec fails or if the test is run with -v
+ ReportEntryVisibilityFailureOrVerbose
+ // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.))
+ ReportEntryVisibilityNever
+)
+
+var revEnumSupport = NewEnumSupport(map[uint]string{
+ uint(ReportEntryVisibilityAlways): "always",
+ uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose",
+ uint(ReportEntryVisibilityNever): "never",
+})
+
+func (rev ReportEntryVisibility) String() string {
+ return revEnumSupport.String(uint(rev))
+}
+func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error {
+ out, err := revEnumSupport.UnmarshJSON(b)
+ *rev = ReportEntryVisibility(out)
+ return err
+}
+func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) {
+ return revEnumSupport.MarshJSON(uint(rev))
+}
+
+func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool {
+ for _, visibility := range visibilities {
+ if v == visibility {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go
new file mode 100644
index 000000000..ddcbec1ba
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -0,0 +1,922 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "time"
+)
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+
+var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+func init() {
+ if os.Getenv("GINKGO_TIME_FORMAT") != "" {
+ GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT")
+ }
+}
+
+// Report captures information about a Ginkgo test run
+type Report struct {
+ //SuitePath captures the absolute path to the test suite
+ SuitePath string
+
+ //SuiteDescription captures the description string passed to the DSL's RunSpecs() function
+ SuiteDescription string
+
+ //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
+ SuiteLabels []string
+
+ //SuiteSucceeded captures the success or failure status of the test run
+ //If true, the test run is considered successful.
+ //If false, the test run is considered unsuccessful
+ SuiteSucceeded bool
+
+ //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused
+ //(i.e an `FIt` or an `FDescribe`
+ SuiteHasProgrammaticFocus bool
+
+ //SpecialSuiteFailureReasons may contain special failure reasons
+ //For example, a test suite might be considered "failed" even if none of the individual specs
+ //have a failure state. For example, if the user has configured --fail-on-pending the test suite
+ //will have failed if there are pending tests even though all non-pending tests may have passed. In such
+ //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure.
+ //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user.
+ //Since multiple special failure reasons can occur, this field is a slice.
+ SpecialSuiteFailureReasons []string
+
+ //PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+ //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+ //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+ PreRunStats PreRunStats
+
+ //StartTime and EndTime capture the start and end time of the test run
+ StartTime time.Time
+ EndTime time.Time
+
+ //RunTime captures the duration of the test run
+ RunTime time.Duration
+
+ //SuiteConfig captures the Ginkgo configuration governing this test run
+ //SuiteConfig includes information necessary for reproducing an identical test run,
+ //such as the random seed and any filters applied during the test run
+ SuiteConfig SuiteConfig
+
+ //SpecReports is a list of all SpecReports generated by this test run
+ //It is empty when the SuiteReport is provided to ReportBeforeSuite
+ SpecReports SpecReports
+}
+
+// PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+type PreRunStats struct {
+ TotalSpecs int
+ SpecsThatWillRun int
+}
+
+// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
+// to form a complete final report.
+func (report Report) Add(other Report) Report {
+ report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
+
+ if other.StartTime.Before(report.StartTime) {
+ report.StartTime = other.StartTime
+ }
+
+ if other.EndTime.After(report.EndTime) {
+ report.EndTime = other.EndTime
+ }
+
+ specialSuiteFailureReasons := []string{}
+ reasonsLookup := map[string]bool{}
+ for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} {
+ for _, reason := range reasons {
+ if !reasonsLookup[reason] {
+ reasonsLookup[reason] = true
+ specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason)
+ }
+ }
+ }
+ report.SpecialSuiteFailureReasons = specialSuiteFailureReasons
+ report.RunTime = report.EndTime.Sub(report.StartTime)
+
+ reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
+ copy(reports, report.SpecReports)
+ offset := len(report.SpecReports)
+ for i := range other.SpecReports {
+ reports[i+offset] = other.SpecReports[i]
+ }
+
+ report.SpecReports = reports
+ return report
+}
+
+// SpecReport captures information about a Ginkgo spec.
+type SpecReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
+ // one of the NodeTypesForSuiteLevelNodes node types)
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+
+ // State captures whether the spec has passed, failed, etc.
+ State SpecState
+
+ // IsSerial captures whether the spec has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether the spec appears in an Ordered container
+ IsInOrderedContainer bool
+
+ // StartTime and EndTime capture the start and end time of the spec
+ StartTime time.Time
+ EndTime time.Time
+
+ // RunTime captures the duration of the spec
+ RunTime time.Duration
+
+ // ParallelProcess captures the parallel process that this spec ran on
+ ParallelProcess int
+
+ // RunningInParallel captures whether this spec is part of a suite that ran in parallel
+ RunningInParallel bool
+
+ //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
+ //It includes detailed information about the Failure
+ Failure Failure
+
+ // NumAttempts captures the number of times this Spec was run.
+ // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ // Repeated specs can be retried with the use of the MustPassRepeatedly decorator
+ NumAttempts int
+
+ // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ MaxFlakeAttempts int
+
+ // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator
+ MaxMustPassRepeatedly int
+
+ // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter
+ CapturedGinkgoWriterOutput string
+
+ // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel)
+ // This is always empty when running in series or calling CurrentSpecReport()
+ // It is used internally by Ginkgo's reporter
+ CapturedStdOutErr string
+
+ // ReportEntries contains any reports added via `AddReportEntry`
+ ReportEntries ReportEntries
+
+ // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator
+ ProgressReports []ProgressReport
+
+ // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
+ AdditionalFailures []AdditionalFailure
+
+ // SpecEvents capture additional events that occur during the spec run
+ SpecEvents SpecEvents
+}
+
+func (report SpecReport) MarshalJSON() ([]byte, error) {
+ //All this to avoid emitting an empty Failure struct in the JSON
+ out := struct {
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
+ }{
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
+ }
+
+ if !report.Failure.IsZero() {
+ out.Failure = &(report.Failure)
+ }
+ if len(report.ReportEntries) > 0 {
+ out.ReportEntries = report.ReportEntries
+ }
+ if len(report.ProgressReports) > 0 {
+ out.ProgressReports = report.ProgressReports
+ }
+ if len(report.AdditionalFailures) > 0 {
+ out.AdditionalFailures = report.AdditionalFailures
+ }
+ if len(report.SpecEvents) > 0 {
+ out.SpecEvents = report.SpecEvents
+ }
+
+ return json.Marshal(out)
+}
+
+// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput
+// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty.
+// CombinedOutput() is used internally by Ginkgo's reporter.
+func (report SpecReport) CombinedOutput() string {
+ if report.CapturedStdOutErr == "" {
+ return report.CapturedGinkgoWriterOutput
+ }
+ if report.CapturedGinkgoWriterOutput == "" {
+ return report.CapturedStdOutErr
+ }
+ return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
+}
+
+// Failed returns true if report.State is one of the SpecStateFailureStates
+// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
+func (report SpecReport) Failed() bool {
+ return report.State.Is(SpecStateFailureStates)
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report SpecReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ if report.LeafNodeText != "" {
+ texts = append(texts, report.LeafNodeText)
+ }
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report SpecReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ for _, label := range report.LeafNodeLabels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+
+ return out
+}
+
+// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
+ filter, err := ParseLabelFilter(query)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.Labels()), nil
+}
+
+// FileName() returns the name of the file containing the spec
+func (report SpecReport) FileName() string {
+ return report.LeafNodeLocation.FileName
+}
+
+// LineNumber() returns the line number of the leaf node
+func (report SpecReport) LineNumber() int {
+ return report.LeafNodeLocation.LineNumber
+}
+
+// FailureMessage() returns the failure message (or empty string if the test hasn't failed)
+func (report SpecReport) FailureMessage() string {
+ return report.Failure.Message
+}
+
+// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
+func (report SpecReport) FailureLocation() CodeLocation {
+ return report.Failure.Location
+}
+
+// Timeline() returns a timeline view of the report
+func (report SpecReport) Timeline() Timeline {
+ timeline := Timeline{}
+ if !report.Failure.IsZero() {
+ timeline = append(timeline, report.Failure)
+ if report.Failure.AdditionalFailure != nil {
+ timeline = append(timeline, *(report.Failure.AdditionalFailure))
+ }
+ }
+ for _, additionalFailure := range report.AdditionalFailures {
+ timeline = append(timeline, additionalFailure)
+ }
+ for _, reportEntry := range report.ReportEntries {
+ timeline = append(timeline, reportEntry)
+ }
+ for _, progressReport := range report.ProgressReports {
+ timeline = append(timeline, progressReport)
+ }
+ for _, specEvent := range report.SpecEvents {
+ timeline = append(timeline, specEvent)
+ }
+ sort.Sort(timeline)
+ return timeline
+}
+
+type SpecReports []SpecReport
+
+// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
+func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out := make(SpecReports, count)
+ j := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// WithState returns the subset of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) WithState(states SpecState) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ count++
+ }
+ }
+
+ out, j := make(SpecReports, count), 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) CountWithState(states SpecState) int {
+ n := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
+func (reports SpecReports) CountOfFlakedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
+func (reports SpecReports) CountOfRepeatedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// TimelineLocation captures the location of an event in the spec's timeline
+type TimelineLocation struct {
+ //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream
+ Offset int `json:",omitempty"`
+
+ //Order is the order of the event with respect to other events. The absolute value of Order
+ //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order
+ Order int `json:",omitempty"`
+
+ Time time.Time
+}
+
+// TimelineEvent represent an event on the timeline
+// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it
+type TimelineEvent interface {
+ GetTimelineLocation() TimelineLocation
+}
+
+type Timeline []TimelineEvent
+
+func (t Timeline) Len() int { return len(t) }
+func (t Timeline) Less(i, j int) bool {
+ return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order
+}
+func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t Timeline) WithoutHiddenReportEntries() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+// Failure captures failure information for an individual test
+type Failure struct {
+ // Message - the failure message passed into Fail(...). When using a matcher library
+ // like Gomega, this will contain the failure message generated by Gomega.
+ //
+ // Message is also populated if the user has called Skip(...).
+ Message string
+
+ // Location - the CodeLocation where the failure occurred
+ // This CodeLocation will include a fully-populated StackTrace
+ Location CodeLocation
+
+ TimelineLocation TimelineLocation
+
+ // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
+ // then ForwardedPanic will be populated with a string representation of the captured panic.
+ ForwardedPanic string `json:",omitempty"`
+
+ // FailureNodeContext - one of three contexts describing the node in which the failure occurred:
+ // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated
+ // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated.
+ // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated.
+ //
+ // FailureNodeType will contain the NodeType of the node in which the failure occurred.
+ // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
+ // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
+ FailureNodeContext FailureNodeContext `json:",omitempty"`
+
+ FailureNodeType NodeType `json:",omitempty"`
+
+ FailureNodeLocation CodeLocation `json:",omitempty"`
+
+ FailureNodeContainerIndex int `json:",omitempty"`
+
+ //ProgressReport is populated if the spec was interrupted or timed out
+ ProgressReport ProgressReport `json:",omitempty"`
+
+ //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck.
+ AdditionalFailure *AdditionalFailure `json:",omitempty"`
+}
+
+func (f Failure) IsZero() bool {
+ return f.Message == "" && (f.Location == CodeLocation{})
+}
+
+func (f Failure) GetTimelineLocation() TimelineLocation {
+ return f.TimelineLocation
+}
+
+// FailureNodeContext captures the location context for the node containing the failing line of code
+type FailureNodeContext uint
+
+const (
+ FailureNodeContextInvalid FailureNodeContext = iota
+
+ FailureNodeIsLeafNode
+ FailureNodeAtTopLevel
+ FailureNodeInContainer
+)
+
+var fncEnumSupport = NewEnumSupport(map[uint]string{
+ uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT",
+ uint(FailureNodeIsLeafNode): "leaf-node",
+ uint(FailureNodeAtTopLevel): "top-level",
+ uint(FailureNodeInContainer): "in-container",
+})
+
+func (fnc FailureNodeContext) String() string {
+ return fncEnumSupport.String(uint(fnc))
+}
+func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error {
+ out, err := fncEnumSupport.UnmarshJSON(b)
+ *fnc = FailureNodeContext(out)
+ return err
+}
+func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) {
+ return fncEnumSupport.MarshJSON(uint(fnc))
+}
+
+// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec
+// these typically occur in clean up nodes after the spec has failed.
+// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is
+type AdditionalFailure struct {
+ State SpecState
+ Failure Failure
+}
+
+func (f AdditionalFailure) GetTimelineLocation() TimelineLocation {
+ return f.Failure.TimelineLocation
+}
+
+// SpecState captures the state of a spec
+// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
+type SpecState uint
+
+const (
+ SpecStateInvalid SpecState = 0
+
+ SpecStatePending SpecState = 1 << iota
+ SpecStateSkipped
+ SpecStatePassed
+ SpecStateFailed
+ SpecStateAborted
+ SpecStatePanicked
+ SpecStateInterrupted
+ SpecStateTimedout
+)
+
+var ssEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecStateInvalid): "INVALID SPEC STATE",
+ uint(SpecStatePending): "pending",
+ uint(SpecStateSkipped): "skipped",
+ uint(SpecStatePassed): "passed",
+ uint(SpecStateFailed): "failed",
+ uint(SpecStateAborted): "aborted",
+ uint(SpecStatePanicked): "panicked",
+ uint(SpecStateInterrupted): "interrupted",
+ uint(SpecStateTimedout): "timedout",
+})
+
+func (ss SpecState) String() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss SpecState) GomegaString() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss *SpecState) UnmarshalJSON(b []byte) error {
+ out, err := ssEnumSupport.UnmarshJSON(b)
+ *ss = SpecState(out)
+ return err
+}
+func (ss SpecState) MarshalJSON() ([]byte, error) {
+ return ssEnumSupport.MarshJSON(uint(ss))
+}
+
+var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted
+
+func (ss SpecState) Is(states SpecState) bool {
+ return ss&states != 0
+}
+
+// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
+type ProgressReport struct {
+ Message string `json:",omitempty"`
+ ParallelProcess int `json:",omitempty"`
+ RunningInParallel bool `json:",omitempty"`
+
+ ContainerHierarchyTexts []string `json:",omitempty"`
+ LeafNodeText string `json:",omitempty"`
+ LeafNodeLocation CodeLocation `json:",omitempty"`
+ SpecStartTime time.Time `json:",omitempty"`
+
+ CurrentNodeType NodeType `json:",omitempty"`
+ CurrentNodeText string `json:",omitempty"`
+ CurrentNodeLocation CodeLocation `json:",omitempty"`
+ CurrentNodeStartTime time.Time `json:",omitempty"`
+
+ CurrentStepText string `json:",omitempty"`
+ CurrentStepLocation CodeLocation `json:",omitempty"`
+ CurrentStepStartTime time.Time `json:",omitempty"`
+
+ AdditionalReports []string `json:",omitempty"`
+
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ TimelineLocation TimelineLocation `json:",omitempty"`
+
+ Goroutines []Goroutine `json:",omitempty"`
+}
+
+func (pr ProgressReport) IsZero() bool {
+ return pr.CurrentNodeType == NodeTypeInvalid
+}
+
+func (pr ProgressReport) Time() time.Time {
+ return pr.TimelineLocation.Time
+}
+
+func (pr ProgressReport) SpecGoroutine() Goroutine {
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine {
+ return goroutine
+ }
+ }
+ return Goroutine{}
+}
+
+func (pr ProgressReport) HighlightedGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) OtherGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
+ out := pr
+ out.CapturedGinkgoWriterOutput = ""
+ return out
+}
+
+func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport {
+ out := pr
+ filteredGoroutines := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ filteredGoroutines = append(filteredGoroutines, goroutine)
+ }
+ }
+ out.Goroutines = filteredGoroutines
+ return out
+}
+
+func (pr ProgressReport) GetTimelineLocation() TimelineLocation {
+ return pr.TimelineLocation
+}
+
+type Goroutine struct {
+ ID uint64
+ State string
+ Stack []FunctionCall
+ IsSpecGoroutine bool
+}
+
+func (g Goroutine) IsZero() bool {
+ return g.ID == 0
+}
+
+func (g Goroutine) HasHighlights() bool {
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ return true
+ }
+ }
+
+ return false
+}
+
+type FunctionCall struct {
+ Function string
+ Filename string
+ Line int
+ Highlight bool `json:",omitempty"`
+ Source []string `json:",omitempty"`
+ SourceHighlight int `json:",omitempty"`
+}
+
+// NodeType captures the type of a given Ginkgo Node
+type NodeType uint
+
+const (
+ NodeTypeInvalid NodeType = 0
+
+ NodeTypeContainer NodeType = 1 << iota
+ NodeTypeIt
+
+ NodeTypeBeforeEach
+ NodeTypeJustBeforeEach
+ NodeTypeAfterEach
+ NodeTypeJustAfterEach
+
+ NodeTypeBeforeAll
+ NodeTypeAfterAll
+
+ NodeTypeBeforeSuite
+ NodeTypeSynchronizedBeforeSuite
+ NodeTypeAfterSuite
+ NodeTypeSynchronizedAfterSuite
+
+ NodeTypeReportBeforeEach
+ NodeTypeReportAfterEach
+ NodeTypeReportBeforeSuite
+ NodeTypeReportAfterSuite
+
+ NodeTypeCleanupInvalid
+ NodeTypeCleanupAfterEach
+ NodeTypeCleanupAfterAll
+ NodeTypeCleanupAfterSuite
+)
+
+var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
+var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite
+
+var ntEnumSupport = NewEnumSupport(map[uint]string{
+ uint(NodeTypeInvalid): "INVALID NODE TYPE",
+ uint(NodeTypeContainer): "Container",
+ uint(NodeTypeIt): "It",
+ uint(NodeTypeBeforeEach): "BeforeEach",
+ uint(NodeTypeJustBeforeEach): "JustBeforeEach",
+ uint(NodeTypeAfterEach): "AfterEach",
+ uint(NodeTypeJustAfterEach): "JustAfterEach",
+ uint(NodeTypeBeforeAll): "BeforeAll",
+ uint(NodeTypeAfterAll): "AfterAll",
+ uint(NodeTypeBeforeSuite): "BeforeSuite",
+ uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite",
+ uint(NodeTypeAfterSuite): "AfterSuite",
+ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
+ uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
+ uint(NodeTypeReportAfterEach): "ReportAfterEach",
+ uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite",
+ uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
+ uint(NodeTypeCleanupInvalid): "DeferCleanup",
+ uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
+ uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)",
+ uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)",
+})
+
+func (nt NodeType) String() string {
+ return ntEnumSupport.String(uint(nt))
+}
+func (nt *NodeType) UnmarshalJSON(b []byte) error {
+ out, err := ntEnumSupport.UnmarshJSON(b)
+ *nt = NodeType(out)
+ return err
+}
+func (nt NodeType) MarshalJSON() ([]byte, error) {
+ return ntEnumSupport.MarshJSON(uint(nt))
+}
+
+func (nt NodeType) Is(nodeTypes NodeType) bool {
+ return nt&nodeTypes != 0
+}
+
+/*
+SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events.
+*/
+type SpecEvent struct {
+ SpecEventType SpecEventType
+
+ CodeLocation CodeLocation
+ TimelineLocation TimelineLocation
+
+ Message string `json:",omitempty"`
+ Duration time.Duration `json:",omitempty"`
+ NodeType NodeType `json:",omitempty"`
+ Attempt int `json:",omitempty"`
+}
+
+func (se SpecEvent) GetTimelineLocation() TimelineLocation {
+ return se.TimelineLocation
+}
+
+func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool {
+ return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd)
+}
+
+func (se SpecEvent) GomegaString() string {
+ out := &strings.Builder{}
+ out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ")
+ if se.Message != "" {
+ out.WriteString("Message=")
+ out.WriteString(`"` + se.Message + `",`)
+ }
+ if se.Duration != 0 {
+ out.WriteString("Duration=" + se.Duration.String() + ",")
+ }
+ if se.NodeType != NodeTypeInvalid {
+ out.WriteString("NodeType=" + se.NodeType.String() + ",")
+ }
+ if se.Attempt != 0 {
+ out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",")
+ }
+ out.WriteString("CL=" + se.CodeLocation.String() + ",")
+ out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset))
+
+ return out.String()
+}
+
+type SpecEvents []SpecEvent
+
+func (se SpecEvents) WithType(seType SpecEventType) SpecEvents {
+ out := SpecEvents{}
+ for _, event := range se {
+ if event.SpecEventType.Is(seType) {
+ out = append(out, event)
+ }
+ }
+ return out
+}
+
+type SpecEventType uint
+
+const (
+ SpecEventInvalid SpecEventType = 0
+
+ SpecEventByStart SpecEventType = 1 << iota
+ SpecEventByEnd
+ SpecEventNodeStart
+ SpecEventNodeEnd
+ SpecEventSpecRepeat
+ SpecEventSpecRetry
+)
+
+var seEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecEventInvalid): "INVALID SPEC EVENT",
+ uint(SpecEventByStart): "By",
+ uint(SpecEventByEnd): "By (End)",
+ uint(SpecEventNodeStart): "Node",
+ uint(SpecEventNodeEnd): "Node (End)",
+ uint(SpecEventSpecRepeat): "Repeat",
+ uint(SpecEventSpecRetry): "Retry",
+})
+
+func (se SpecEventType) String() string {
+ return seEnumSupport.String(uint(se))
+}
+func (se *SpecEventType) UnmarshalJSON(b []byte) error {
+ out, err := seEnumSupport.UnmarshJSON(b)
+ *se = SpecEventType(out)
+ return err
+}
+func (se SpecEventType) MarshalJSON() ([]byte, error) {
+ return seEnumSupport.MarshJSON(uint(se))
+}
+
+func (se SpecEventType) Is(specEventTypes SpecEventType) bool {
+ return se&specEventTypes != 0
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go
new file mode 100644
index 000000000..02d319bba
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types_patch.go
@@ -0,0 +1,8 @@
+package types
+
+type TestSpec interface {
+ CodeLocations() []CodeLocation
+ Text() string
+ AppendText(text string)
+ Labels() []string
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
new file mode 100644
index 000000000..caf3c9f5e
--- /dev/null
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -0,0 +1,3 @@
+package types
+
+const VERSION = "2.21.0"
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go
new file mode 100644
index 000000000..2db8cfa6e
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmd.go
@@ -0,0 +1,23 @@
+package cmd
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdimages"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+)
+
+func DefaultExtensionCommands(registry *extension.Registry) []*cobra.Command {
+ return []*cobra.Command{
+ cmdrun.NewRunSuiteCommand(registry),
+ cmdrun.NewRunTestCommand(registry),
+ cmdlist.NewListCommand(registry),
+ cmdinfo.NewInfoCommand(registry),
+ cmdupdate.NewUpdateCommand(registry),
+ cmdimages.NewImagesCommand(registry),
+ }
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdimages/cmdimages.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdimages/cmdimages.go
new file mode 100644
index 000000000..33b458fac
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdimages/cmdimages.go
@@ -0,0 +1,36 @@
+package cmdimages
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewImagesCommand(registry *extension.Registry) *cobra.Command {
+ componentFlags := flags.NewComponentFlags()
+
+ cmd := &cobra.Command{
+ Use: "images",
+ Short: "List test images",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ extension := registry.Get(componentFlags.Component)
+ if extension == nil {
+ return fmt.Errorf("couldn't find the component %q", componentFlags.Component)
+ }
+ images, err := json.Marshal(extension.Images)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", images)
+ return nil
+ },
+ }
+ componentFlags.BindFlags(cmd.Flags())
+ return cmd
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go
new file mode 100644
index 000000000..1d4237876
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go
@@ -0,0 +1,38 @@
+package cmdinfo
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewInfoCommand(registry *extension.Registry) *cobra.Command {
+ componentFlags := flags.NewComponentFlags()
+
+ cmd := &cobra.Command{
+ Use: "info",
+ Short: "Display extension metadata",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ extension := registry.Get(componentFlags.Component)
+ if extension == nil {
+ return fmt.Errorf("couldn't find the component %q", componentFlags.Component)
+ }
+
+ info, err := json.MarshalIndent(extension, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(os.Stdout, "%s\n", string(info))
+ return nil
+ },
+ }
+ componentFlags.BindFlags(cmd.Flags())
+ return cmd
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go
new file mode 100644
index 000000000..31a040b7c
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go
@@ -0,0 +1,133 @@
+package cmdlist
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewListCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ suiteFlags *flags.SuiteFlags
+ outputFlags *flags.OutputFlags
+ environmentalFlags *flags.EnvironmentalFlags
+ }{
+ suiteFlags: flags.NewSuiteFlags(),
+ componentFlags: flags.NewComponentFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ environmentalFlags: flags.NewEnvironmentalFlags(),
+ }
+
+ // Tests
+ listTestsCmd := &cobra.Command{
+ Use: "tests",
+ Short: "List available tests",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+
+ // Find suite, if specified
+ var foundSuite *extension.Suite
+ var err error
+ if opts.suiteFlags.Suite != "" {
+ foundSuite, err = ext.GetSuite(opts.suiteFlags.Suite)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Filter for suite
+ specs := ext.GetSpecs()
+ if foundSuite != nil {
+ specs, err = specs.Filter(foundSuite.Qualifiers)
+ if err != nil {
+ return err
+ }
+ }
+
+ specs, err = specs.FilterByEnvironment(*opts.environmentalFlags)
+ if err != nil {
+ return err
+ }
+
+ data, err := opts.outputFlags.Marshal(specs)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.suiteFlags.BindFlags(listTestsCmd.Flags())
+ opts.componentFlags.BindFlags(listTestsCmd.Flags())
+ opts.environmentalFlags.BindFlags(listTestsCmd.Flags())
+ opts.outputFlags.BindFlags(listTestsCmd.Flags())
+
+ // Suites
+ listSuitesCommand := &cobra.Command{
+ Use: "suites",
+ Short: "List available suites",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+
+ suites := ext.Suites
+
+ data, err := opts.outputFlags.Marshal(suites)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.componentFlags.BindFlags(listSuitesCommand.Flags())
+ opts.outputFlags.BindFlags(listSuitesCommand.Flags())
+
+ // Components
+ listComponentsCmd := &cobra.Command{
+ Use: "components",
+ Short: "List available components",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ var components []*extension.Component
+ registry.Walk(func(e *extension.Extension) {
+ components = append(components, &e.Component)
+ })
+
+ data, err := opts.outputFlags.Marshal(components)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", string(data))
+ return nil
+ },
+ }
+ opts.outputFlags.BindFlags(listComponentsCmd.Flags())
+
+ var listCmd = &cobra.Command{
+ Use: "list [subcommand]",
+ Short: "List items",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return listTestsCmd.RunE(cmd, args)
+ },
+ }
+ opts.suiteFlags.BindFlags(listCmd.Flags())
+ opts.componentFlags.BindFlags(listCmd.Flags())
+ opts.outputFlags.BindFlags(listCmd.Flags())
+ opts.environmentalFlags.BindFlags(listCmd.Flags())
+ listCmd.AddCommand(listTestsCmd, listComponentsCmd, listSuitesCommand)
+
+ return listCmd
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go
new file mode 100644
index 000000000..998fd7040
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go
@@ -0,0 +1,153 @@
+package cmdrun
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewRunSuiteCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ outputFlags *flags.OutputFlags
+ concurrencyFlags *flags.ConcurrencyFlags
+ junitPath string
+ htmlPath string
+ }{
+ componentFlags: flags.NewComponentFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ concurrencyFlags: flags.NewConcurrencyFlags(),
+ junitPath: "",
+ htmlPath: "",
+ }
+
+ cmd := &cobra.Command{
+ Use: "run-suite NAME",
+ Short: "Run a group of tests by suite. This is more limited than origin, and intended for light local " +
+ "development use. Orchestration parameters, scheduling, isolation, etc are not obeyed, and Ginkgo tests are executed serially.",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ctx, cancelCause := context.WithCancelCause(context.Background())
+ defer cancelCause(errors.New("exiting"))
+
+ abortCh := make(chan os.Signal, 2)
+ go func() {
+ <-abortCh
+ fmt.Fprintf(os.Stderr, "Interrupted, terminating tests")
+ cancelCause(errors.New("interrupt received"))
+
+ select {
+ case sig := <-abortCh:
+ fmt.Fprintf(os.Stderr, "Interrupted twice, exiting (%s)", sig)
+ switch sig {
+ case syscall.SIGINT:
+ os.Exit(130)
+ default:
+ os.Exit(130) // if we were interrupted, never return zero.
+ }
+
+ case <-time.After(30 * time.Minute): // allow time for cleanup. If we finish before this, we'll exit
+ fmt.Fprintf(os.Stderr, "Timed out during cleanup, exiting")
+ os.Exit(130) // if we were interrupted, never return zero.
+ }
+ }()
+ signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
+
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+ if len(args) != 1 {
+ return fmt.Errorf("must specify one suite name")
+ }
+ suite, err := ext.GetSuite(args[0])
+ if err != nil {
+ return errors.Wrapf(err, "couldn't find suite: %s", args[0])
+ }
+
+ compositeWriter := extensiontests.NewCompositeResultWriter()
+ defer func() {
+ if err = compositeWriter.Flush(); err != nil {
+ fmt.Fprintf(os.Stderr, "failed to write results: %v\n", err)
+ }
+ }()
+
+ // JUnit writer if needed
+ if opts.junitPath != "" {
+ junitWriter, err := extensiontests.NewJUnitResultWriter(opts.junitPath, suite.Name)
+ if err != nil {
+ return errors.Wrap(err, "couldn't create junit writer")
+ }
+ compositeWriter.AddWriter(junitWriter)
+ }
+ // HTML writer if needed
+ if opts.htmlPath != "" {
+ htmlWriter, err := extensiontests.NewHTMLResultWriter(opts.htmlPath, suite.Name)
+ if err != nil {
+ return errors.Wrap(err, "couldn't create html writer")
+ }
+ compositeWriter.AddWriter(htmlWriter)
+ }
+
+ // JSON writer
+ jsonWriter, err := extensiontests.NewJSONResultWriter(os.Stdout,
+ extensiontests.ResultFormat(opts.outputFlags.Output))
+ if err != nil {
+ return err
+ }
+ compositeWriter.AddWriter(jsonWriter)
+
+ specs, err := ext.GetSpecs().Filter(suite.Qualifiers)
+ if err != nil {
+ return errors.Wrap(err, "couldn't filter specs")
+ }
+
+ concurrency := opts.concurrencyFlags.MaxConcurency
+ if suite.Parallelism > 0 {
+ concurrency = min(concurrency, suite.Parallelism)
+ }
+ results, runErr := specs.Run(ctx, compositeWriter, concurrency)
+ if opts.junitPath != "" {
+ // we want to commit the results to disk regardless of the success or failure of the specs
+ if err := writeResults(opts.junitPath, results); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write test results to disk: %v\n", err)
+ }
+ }
+ return runErr
+ },
+ }
+ opts.componentFlags.BindFlags(cmd.Flags())
+ opts.outputFlags.BindFlags(cmd.Flags())
+ opts.concurrencyFlags.BindFlags(cmd.Flags())
+ cmd.Flags().StringVarP(&opts.junitPath, "junit-path", "j", opts.junitPath, "write results to junit XML")
+ cmd.Flags().StringVar(&opts.htmlPath, "html-path", opts.htmlPath, "write results to summary HTML")
+
+ return cmd
+}
+
+func writeResults(jUnitPath string, results []*extensiontests.ExtensionTestResult) error {
+ jUnitDir := filepath.Dir(jUnitPath)
+ if err := os.MkdirAll(jUnitDir, 0755); err != nil {
+ return fmt.Errorf("failed to create output directory: %v", err)
+ }
+
+ encodedResults, err := json.MarshalIndent(results, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal results: %v", err)
+ }
+
+ outputPath := filepath.Join(jUnitDir, fmt.Sprintf("extension_test_result_e2e_%s.json", time.Now().UTC().Format("20060102-150405")))
+ return os.WriteFile(outputPath, encodedResults, 0644)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go
new file mode 100644
index 000000000..c62021e7e
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go
@@ -0,0 +1,113 @@
+package cmdrun
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+func NewRunTestCommand(registry *extension.Registry) *cobra.Command {
+ opts := struct {
+ componentFlags *flags.ComponentFlags
+ concurrencyFlags *flags.ConcurrencyFlags
+ nameFlags *flags.NamesFlags
+ outputFlags *flags.OutputFlags
+ }{
+ componentFlags: flags.NewComponentFlags(),
+ nameFlags: flags.NewNamesFlags(),
+ outputFlags: flags.NewOutputFlags(),
+ concurrencyFlags: flags.NewConcurrencyFlags(),
+ }
+
+ cmd := &cobra.Command{
+ Use: "run-test [-n NAME...] [NAME]",
+ Short: "Runs tests by name",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ctx, cancelCause := context.WithCancelCause(context.Background())
+ defer cancelCause(errors.New("exiting"))
+
+ abortCh := make(chan os.Signal, 2)
+ go func() {
+ <-abortCh
+ fmt.Fprintf(os.Stderr, "Interrupted, terminating tests")
+ cancelCause(errors.New("interrupt received"))
+
+ select {
+ case sig := <-abortCh:
+ fmt.Fprintf(os.Stderr, "Interrupted twice, exiting (%s)", sig)
+ switch sig {
+ case syscall.SIGINT:
+ os.Exit(130)
+ default:
+ os.Exit(130) // if we were interrupted, never return zero.
+ }
+
+ case <-time.After(30 * time.Minute): // allow time for cleanup. If we finish before this, we'll exit
+ fmt.Fprintf(os.Stderr, "Timed out during cleanup, exiting")
+ os.Exit(130) // if we were interrupted, never return zero.
+ }
+ }()
+ signal.Notify(abortCh, syscall.SIGINT, syscall.SIGTERM)
+
+ ext := registry.Get(opts.componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("component not found: %s", opts.componentFlags.Component)
+ }
+ if len(args) > 1 {
+ return fmt.Errorf("use --names to specify more than one test")
+ }
+ opts.nameFlags.Names = append(opts.nameFlags.Names, args...)
+
+ // allow reading tests from an stdin pipe
+ info, err := os.Stdin.Stat()
+ if err != nil {
+ return err
+ }
+ if info.Mode()&os.ModeCharDevice == 0 { // Check if input is from a pipe
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ opts.nameFlags.Names = append(opts.nameFlags.Names, scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ return fmt.Errorf("error reading from stdin: %v", err)
+ }
+ }
+
+ if len(opts.nameFlags.Names) == 0 {
+ return fmt.Errorf("must specify at least one test")
+ }
+
+ specs, err := ext.FindSpecsByName(opts.nameFlags.Names...)
+ if err != nil {
+ return err
+ }
+
+ w, err := extensiontests.NewJSONResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output))
+ if err != nil {
+ return err
+ }
+ defer w.Flush()
+
+ _, err = specs.Run(ctx, w, opts.concurrencyFlags.MaxConcurency)
+ return err
+ },
+ }
+ opts.componentFlags.BindFlags(cmd.Flags())
+ opts.nameFlags.BindFlags(cmd.Flags())
+ opts.outputFlags.BindFlags(cmd.Flags())
+ opts.concurrencyFlags.BindFlags(cmd.Flags())
+
+ return cmd
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go
new file mode 100644
index 000000000..5d847308e
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdupdate/update.go
@@ -0,0 +1,84 @@
+package cmdupdate
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+const metadataDirectory = ".openshift-tests-extension"
+
+// NewUpdateCommand adds an "update" command used to generate and verify the metadata we keep track of. This should
+// be a black box to end users, i.e. we can add more criteria later they'll consume when revendoring. For now,
+// we prevent a test to be renamed without updating other names, or a test to be deleted.
+func NewUpdateCommand(registry *extension.Registry) *cobra.Command {
+ componentFlags := flags.NewComponentFlags()
+
+ cmd := &cobra.Command{
+ Use: "update",
+ Short: "Update test metadata",
+ SilenceUsage: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ext := registry.Get(componentFlags.Component)
+ if ext == nil {
+ return fmt.Errorf("couldn't find the component %q", componentFlags.Component)
+ }
+
+ // Create the metadata directory if it doesn't exist
+ if err := os.MkdirAll(metadataDirectory, 0755); err != nil {
+ return fmt.Errorf("failed to create directory %s: %w", metadataDirectory, err)
+ }
+
+ // Read existing specs
+ metadataPath := filepath.Join(metadataDirectory, fmt.Sprintf("%s.json", strings.ReplaceAll(ext.Component.Identifier(), ":", "_")))
+ var oldSpecs extensiontests.ExtensionTestSpecs
+ source, err := os.Open(metadataPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("failed to open file: %s: %+w", metadataPath, err)
+ }
+ } else {
+ if err := json.NewDecoder(source).Decode(&oldSpecs); err != nil {
+ return fmt.Errorf("failed to decode file: %s: %+w", metadataPath, err)
+ }
+
+ missing, err := ext.FindRemovedTestsWithoutRename(oldSpecs)
+ if err != nil && len(missing) > 0 {
+ fmt.Fprintf(os.Stderr, "Missing Tests:\n")
+ for _, name := range missing {
+ fmt.Fprintf(os.Stdout, " * %s\n", name)
+ }
+ fmt.Fprintf(os.Stderr, "\n")
+
+ return fmt.Errorf("missing tests, if you've renamed tests you must add their names to OriginalName, " +
+ "or mark them obsolete")
+ }
+ }
+
+ // no missing tests, write the results
+ newSpecs := ext.GetSpecs()
+ data, err := json.MarshalIndent(newSpecs, "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal specs to JSON: %w", err)
+ }
+
+ // Write the JSON data to the file
+ if err := os.WriteFile(metadataPath, data, 0644); err != nil {
+ return fmt.Errorf("failed to write file %s: %w", metadataPath, err)
+ }
+
+ fmt.Printf("successfully updated metadata\n")
+ return nil
+ },
+ }
+ componentFlags.BindFlags(cmd.Flags())
+ return cmd
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go
new file mode 100644
index 000000000..b7651ba02
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go
@@ -0,0 +1,26 @@
+package dbtime
+
+import "time"
+
+// DBTime is a type suitable for direct importing into databases like BigQuery,
+// formatted like 2006-01-02 15:04:05.000000 UTC.
+type DBTime time.Time
+
+func Ptr(t time.Time) *DBTime {
+ return (*DBTime)(&t)
+}
+
+func (dbt *DBTime) MarshalJSON() ([]byte, error) {
+ formattedTime := time.Time(*dbt).Format(`"2006-01-02 15:04:05.000000 UTC"`)
+ return []byte(formattedTime), nil
+}
+
+func (dbt *DBTime) UnmarshalJSON(b []byte) error {
+ timeStr := string(b[1 : len(b)-1])
+ parsedTime, err := time.Parse("2006-01-02 15:04:05.000000 UTC", timeStr)
+ if err != nil {
+ return err
+ }
+ *dbt = (DBTime)(parsedTime)
+ return nil
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go
new file mode 100644
index 000000000..b9fbfb2ec
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go
@@ -0,0 +1,165 @@
+package extension
+
+import (
+ "fmt"
+ "strings"
+
+ et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/version"
+)
+
+func NewExtension(product, kind, name string) *Extension {
+ return &Extension{
+ APIVersion: CurrentExtensionAPIVersion,
+ Source: Source{
+ Commit: version.CommitFromGit,
+ BuildDate: version.BuildDate,
+ GitTreeState: version.GitTreeState,
+ },
+ Component: Component{
+ Product: product,
+ Kind: kind,
+ Name: name,
+ },
+ }
+}
+
+func (e *Extension) GetSuite(name string) (*Suite, error) {
+ var suite *Suite
+
+ for _, s := range e.Suites {
+ if s.Name == name {
+ suite = &s
+ break
+ }
+ }
+
+ if suite == nil {
+ return nil, fmt.Errorf("no such suite: %s", name)
+ }
+
+ return suite, nil
+}
+
+func (e *Extension) GetSpecs() et.ExtensionTestSpecs {
+ return e.specs
+}
+
+func (e *Extension) AddSpecs(specs et.ExtensionTestSpecs) {
+ specs.Walk(func(spec *et.ExtensionTestSpec) {
+ spec.Source = e.Component.Identifier()
+ })
+
+ e.specs = append(e.specs, specs...)
+}
+
+// IgnoreObsoleteTests allows removal of a test.
+func (e *Extension) IgnoreObsoleteTests(testNames ...string) {
+ if e.obsoleteTests == nil {
+ e.obsoleteTests = sets.New[string](testNames...)
+ } else {
+ e.obsoleteTests.Insert(testNames...)
+ }
+}
+
+// FindRemovedTestsWithoutRename compares the current set of test specs against oldSpecs, including consideration of the original name,
+// we return an error. Can be used to detect test renames or removals.
+func (e *Extension) FindRemovedTestsWithoutRename(oldSpecs et.ExtensionTestSpecs) ([]string, error) {
+ currentSpecs := e.GetSpecs()
+ currentMap := make(map[string]bool)
+
+ // Populate current specs into a map for quick lookup by both Name and OriginalName.
+ for _, spec := range currentSpecs {
+ currentMap[spec.Name] = true
+ if spec.OriginalName != "" {
+ currentMap[spec.OriginalName] = true
+ }
+ }
+
+ var removedTests []string
+
+ // Check oldSpecs against current specs.
+ for _, oldSpec := range oldSpecs {
+ // Skip if the test is marked as obsolete.
+ if e.obsoleteTests.Has(oldSpec.Name) {
+ continue
+ }
+
+ // Check if oldSpec is missing in currentSpecs by both Name and OriginalName.
+ if !currentMap[oldSpec.Name] && (oldSpec.OriginalName == "" || !currentMap[oldSpec.OriginalName]) {
+ removedTests = append(removedTests, oldSpec.Name)
+ }
+ }
+
+ // Return error if any removed tests were found.
+ if len(removedTests) > 0 {
+ return removedTests, fmt.Errorf("tests removed without rename: %v", removedTests)
+ }
+
+ return nil, nil
+}
+
+// AddGlobalSuite adds a suite whose qualifiers will apply to all tests,
+// not just this one. Allowing a developer to create a composed suite of
+// tests from many sources.
+func (e *Extension) AddGlobalSuite(suite Suite) *Extension {
+ if e.Suites == nil {
+ e.Suites = []Suite{suite}
+ } else {
+ e.Suites = append(e.Suites, suite)
+ }
+
+ return e
+}
+
+// AddSuite adds a suite whose qualifiers will only apply to tests present
+// in its own extension.
+func (e *Extension) AddSuite(suite Suite) *Extension {
+ expr := fmt.Sprintf("source == %q", e.Component.Identifier())
+ if len(suite.Qualifiers) == 0 {
+ suite.Qualifiers = []string{expr}
+ } else {
+ for i := range suite.Qualifiers {
+ suite.Qualifiers[i] = fmt.Sprintf("(%s) && (%s)",
+ expr, suite.Qualifiers[i])
+ }
+ }
+
+ e.AddGlobalSuite(suite)
+ return e
+}
+
+func (e *Extension) RegisterImage(image Image) *Extension {
+ e.Images = append(e.Images, image)
+ return e
+}
+
+func (e *Extension) FindSpecsByName(names ...string) (et.ExtensionTestSpecs, error) {
+ var specs et.ExtensionTestSpecs
+ var notFound []string
+
+ for _, name := range names {
+ found := false
+ for i := range e.specs {
+ if e.specs[i].Name == name {
+ specs = append(specs, e.specs[i])
+ found = true
+ break
+ }
+ }
+ if !found {
+ notFound = append(notFound, name)
+ }
+ }
+
+ if len(notFound) > 0 {
+ return nil, fmt.Errorf("no such tests: %s", strings.Join(notFound, ", "))
+ }
+
+ return specs, nil
+}
+
+func (e *Component) Identifier() string {
+ return fmt.Sprintf("%s:%s:%s", e.Product, e.Kind, e.Name)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go
new file mode 100644
index 000000000..b5116a535
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go
@@ -0,0 +1,92 @@
+package extensiontests
+
+import (
+ "fmt"
+ "strings"
+)
+
+func PlatformEquals(platform string) string {
+ return fmt.Sprintf(`platform=="%s"`, platform)
+}
+
+func NetworkEquals(network string) string {
+ return fmt.Sprintf(`network=="%s"`, network)
+}
+
+func NetworkStackEquals(networkStack string) string {
+ return fmt.Sprintf(`networkStack=="%s"`, networkStack)
+}
+
+func UpgradeEquals(upgrade string) string {
+ return fmt.Sprintf(`upgrade=="%s"`, upgrade)
+}
+
+func TopologyEquals(topology string) string {
+ return fmt.Sprintf(`topology=="%s"`, topology)
+}
+
+func ArchitectureEquals(arch string) string {
+ return fmt.Sprintf(`architecture=="%s"`, arch)
+}
+
+func APIGroupEnabled(apiGroup string) string {
+ return fmt.Sprintf(`apiGroups.exists(api, api=="%s")`, apiGroup)
+}
+
+func APIGroupDisabled(apiGroup string) string {
+ return fmt.Sprintf(`!apiGroups.exists(api, api=="%s")`, apiGroup)
+}
+
+func FeatureGateEnabled(featureGate string) string {
+ return fmt.Sprintf(`featureGates.exists(fg, fg=="%s")`, featureGate)
+}
+
+func FeatureGateDisabled(featureGate string) string {
+ return fmt.Sprintf(`!featureGates.exists(fg, fg=="%s")`, featureGate)
+}
+
+func ExternalConnectivityEquals(externalConnectivity string) string {
+ return fmt.Sprintf(`externalConnectivity=="%s"`, externalConnectivity)
+}
+
+func OptionalCapabilitiesIncludeAny(optionalCapability ...string) string {
+ for i := range optionalCapability {
+ optionalCapability[i] = OptionalCapabilityExists(optionalCapability[i])
+ }
+ return fmt.Sprintf("(%s)", fmt.Sprint(strings.Join(optionalCapability, " || ")))
+}
+
+func OptionalCapabilitiesIncludeAll(optionalCapability ...string) string {
+ for i := range optionalCapability {
+ optionalCapability[i] = OptionalCapabilityExists(optionalCapability[i])
+ }
+ return fmt.Sprintf("(%s)", fmt.Sprint(strings.Join(optionalCapability, " && ")))
+}
+
+func OptionalCapabilityExists(optionalCapability string) string {
+ return fmt.Sprintf(`optionalCapabilities.exists(oc, oc=="%s")`, optionalCapability)
+}
+
+func NoOptionalCapabilitiesExist() string {
+ return "size(optionalCapabilities) == 0"
+}
+
+func InstallerEquals(installer string) string {
+ return fmt.Sprintf(`installer=="%s"`, installer)
+}
+
+func VersionEquals(version string) string {
+ return fmt.Sprintf(`version=="%s"`, version)
+}
+
+func FactEquals(key, value string) string {
+ return fmt.Sprintf(`(fact_keys.exists(k, k=="%s") && facts["%s"].matches("%s"))`, key, key, value)
+}
+
+func Or(cel ...string) string {
+ return fmt.Sprintf("(%s)", strings.Join(cel, " || "))
+}
+
+func And(cel ...string) string {
+ return fmt.Sprintf("(%s)", strings.Join(cel, " && "))
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go
new file mode 100644
index 000000000..9c03a0a84
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go
@@ -0,0 +1,125 @@
+package extensiontests
+
+import (
+ "bytes"
+ _ "embed"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "text/template"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/junit"
+)
+
+func (results ExtensionTestResults) Walk(walkFn func(*ExtensionTestResult)) {
+ for i := range results {
+ walkFn(results[i])
+ }
+}
+
+// AddDetails adds additional information to an ExtensionTestResult. Value must marshal to JSON.
+func (result *ExtensionTestResult) AddDetails(name string, value interface{}) {
+ result.Details = append(result.Details, Details{Name: name, Value: value})
+}
+
+func (result ExtensionTestResult) ToJUnit() *junit.TestCase {
+ tc := &junit.TestCase{
+ Name: result.Name,
+ Duration: float64(result.Duration) / 1000.0,
+ }
+ switch result.Result {
+ case ResultFailed:
+ tc.FailureOutput = &junit.FailureOutput{
+ Message: result.Error,
+ Output: result.Error,
+ }
+ case ResultSkipped:
+ messages := []string{}
+ for _, detail := range result.Details {
+ messages = append(messages, fmt.Sprintf("%s: %s", detail.Name, detail.Value))
+ }
+ tc.SkipMessage = &junit.SkipMessage{
+ Message: strings.Join(messages, "\n"),
+ }
+ case ResultPassed:
+ tc.SystemOut = result.Output
+ }
+
+ return tc
+}
+
+func (results ExtensionTestResults) ToJUnit(suiteName string) junit.TestSuite {
+ suite := junit.TestSuite{
+ Name: suiteName,
+ }
+
+ results.Walk(func(result *ExtensionTestResult) {
+ suite.NumTests++
+ switch result.Result {
+ case ResultFailed:
+ suite.NumFailed++
+ case ResultSkipped:
+ suite.NumSkipped++
+ case ResultPassed:
+ // do nothing
+ default:
+ panic(fmt.Sprintf("unknown result type: %s", result.Result))
+ }
+
+ suite.TestCases = append(suite.TestCases, result.ToJUnit())
+ })
+
+ return suite
+}
+
+//go:embed viewer.html
+var viewerHtml []byte
+
+// RenderResultsHTML renders the HTML viewer template with the provided JSON data.
+// The caller is responsible for marshaling their results to JSON. This allows
+// callers with different result struct types to use the same HTML viewer.
+func RenderResultsHTML(jsonData []byte, suiteName string) ([]byte, error) {
+ tmpl, err := template.New("viewer").Parse(string(viewerHtml))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse template: %w", err)
+ }
+ var out bytes.Buffer
+ if err := tmpl.Execute(&out, struct {
+ Data string
+ SuiteName string
+ }{
+ string(jsonData),
+ suiteName,
+ }); err != nil {
+ return nil, fmt.Errorf("failed to execute template: %w", err)
+ }
+ return out.Bytes(), nil
+}
+
+func (results ExtensionTestResults) ToHTML(suiteName string) ([]byte, error) {
+ encoded, err := json.Marshal(results)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal extension test results: %w", err)
+ }
+ // pare down the output if there's a lot, we want this to load in some reasonable amount of time
+ if len(encoded) > 2<<20 {
+ // n.b. this is wasteful, but we want to mutate our inputs in a safe manner, so the encode/decode/encode
+ // pass is useful as a deep copy
+ var copiedResults ExtensionTestResults
+ if err := json.Unmarshal(encoded, &copiedResults); err != nil {
+ return nil, fmt.Errorf("failed to unmarshal extension test results: %w", err)
+ }
+ copiedResults.Walk(func(result *ExtensionTestResult) {
+ if result.Result == ResultPassed {
+ result.Error = ""
+ result.Output = ""
+ result.Details = nil
+ }
+ })
+ encoded, err = json.Marshal(copiedResults)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal extension test results: %w", err)
+ }
+ }
+ return RenderResultsHTML(encoded, suiteName)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go
new file mode 100644
index 000000000..f9ca434ca
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go
@@ -0,0 +1,213 @@
+package extensiontests
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/junit"
+)
+
+type ResultWriter interface {
+ Write(result *ExtensionTestResult)
+ Flush() error
+}
+
+type NullResultWriter struct{}
+
+func (NullResultWriter) Write(*ExtensionTestResult) {}
+func (NullResultWriter) Flush() error { return nil }
+
+type CompositeResultWriter struct {
+ writers []ResultWriter
+}
+
+func NewCompositeResultWriter(writers ...ResultWriter) *CompositeResultWriter {
+ return &CompositeResultWriter{
+ writers: writers,
+ }
+}
+
+func (w *CompositeResultWriter) AddWriter(writer ResultWriter) {
+ w.writers = append(w.writers, writer)
+}
+
+func (w *CompositeResultWriter) Write(res *ExtensionTestResult) {
+ for _, writer := range w.writers {
+ writer.Write(res)
+ }
+}
+
+func (w *CompositeResultWriter) Flush() error {
+ var errs []error
+ for _, writer := range w.writers {
+ if err := writer.Flush(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+type JUnitResultWriter struct {
+ lock sync.Mutex
+ testSuite *junit.TestSuite
+ out *os.File
+ suiteName string
+ path string
+ results ExtensionTestResults
+}
+
+func NewJUnitResultWriter(path, suiteName string) (ResultWriter, error) {
+ file, err := os.Create(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &JUnitResultWriter{
+ testSuite: &junit.TestSuite{
+ Name: suiteName,
+ },
+ out: file,
+ suiteName: suiteName,
+ path: path,
+ }, nil
+}
+
+func (w *JUnitResultWriter) Write(res *ExtensionTestResult) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.results = append(w.results, res)
+}
+
+func (w *JUnitResultWriter) Flush() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ data, err := xml.MarshalIndent(w.results.ToJUnit(w.suiteName), "", " ")
+ if err != nil {
+ return fmt.Errorf("failed to marshal JUnit XML: %w", err)
+ }
+ if _, err := w.out.Write(data); err != nil {
+ return err
+ }
+ if err := w.out.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type ResultFormat string
+
+var (
+ JSON ResultFormat = "json"
+ JSONL ResultFormat = "jsonl"
+)
+
+type JSONResultWriter struct {
+ lock sync.Mutex
+ out io.Writer
+ format ResultFormat
+ results ExtensionTestResults
+}
+
+func NewJSONResultWriter(out io.Writer, format ResultFormat) (*JSONResultWriter, error) {
+ switch format {
+ case JSON, JSONL:
+ // do nothing
+ default:
+ return nil, fmt.Errorf("unsupported result format: %s", format)
+ }
+
+ return &JSONResultWriter{
+ out: out,
+ format: format,
+ results: ExtensionTestResults{},
+ }, nil
+}
+
+func (w *JSONResultWriter) Write(result *ExtensionTestResult) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ switch w.format {
+ case JSONL:
+ // JSONL gets written to out as we get the items
+ data, err := json.Marshal(result)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintf(w.out, "%s\n", string(data))
+ case JSON:
+ w.results = append(w.results, result)
+ }
+}
+
+func (w *JSONResultWriter) Flush() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ switch w.format {
+ case JSONL:
+ // we already wrote it out
+ case JSON:
+ data, err := json.MarshalIndent(w.results, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = w.out.Write(data)
+ return err
+ }
+
+ return nil
+}
+
+type HTMLResultWriter struct {
+ lock sync.Mutex
+ testSuite *junit.TestSuite
+ out *os.File
+ suiteName string
+ path string
+ results ExtensionTestResults
+}
+
+func NewHTMLResultWriter(path, suiteName string) (ResultWriter, error) {
+ file, err := os.Create(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &HTMLResultWriter{
+ testSuite: &junit.TestSuite{
+ Name: suiteName,
+ },
+ out: file,
+ suiteName: suiteName,
+ path: path,
+ }, nil
+}
+
+func (w *HTMLResultWriter) Write(res *ExtensionTestResult) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.results = append(w.results, res)
+}
+
+func (w *HTMLResultWriter) Flush() error {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ data, err := w.results.ToHTML(w.suiteName)
+ if err != nil {
+ return fmt.Errorf("failed to create result HTML: %w", err)
+ }
+ if _, err := w.out.Write(data); err != nil {
+ return err
+ }
+ if err := w.out.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
\ No newline at end of file
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go
new file mode 100644
index 000000000..e87809c8a
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go
@@ -0,0 +1,621 @@
+package extensiontests
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/flags"
+)
+
+// Walk iterates over all test specs, and executions the function provided. The test spec can be mutated.
+func (specs ExtensionTestSpecs) Walk(walkFn func(*ExtensionTestSpec)) ExtensionTestSpecs {
+ for i := range specs {
+ walkFn(specs[i])
+ }
+
+ return specs
+}
+
+type SelectFunction func(spec *ExtensionTestSpec) bool
+
+// Select filters the ExtensionTestSpecs to only those that match the provided SelectFunction
+func (specs ExtensionTestSpecs) Select(selectFn SelectFunction) ExtensionTestSpecs {
+ filtered := ExtensionTestSpecs{}
+ for _, spec := range specs {
+ if selectFn(spec) {
+ filtered = append(filtered, spec)
+ }
+ }
+
+ return filtered
+}
+
+// MustSelect filters the ExtensionTestSpecs to only those that match the provided SelectFunction.
+// if no specs are selected, it will throw an error
+func (specs ExtensionTestSpecs) MustSelect(selectFn SelectFunction) (ExtensionTestSpecs, error) {
+ filtered := specs.Select(selectFn)
+ if len(filtered) == 0 {
+ return filtered, fmt.Errorf("no specs selected with specified SelectFunctions")
+ }
+
+ return filtered, nil
+}
+
+// SelectAny filters the ExtensionTestSpecs to only those that match any of the provided SelectFunctions
+func (specs ExtensionTestSpecs) SelectAny(selectFns []SelectFunction) ExtensionTestSpecs {
+ filtered := ExtensionTestSpecs{}
+ for _, spec := range specs {
+ for _, selectFn := range selectFns {
+ if selectFn(spec) {
+ filtered = append(filtered, spec)
+ break
+ }
+ }
+ }
+
+ return filtered
+}
+
+// MustSelectAny filters the ExtensionTestSpecs to only those that match any of the provided SelectFunctions.
+// if no specs are selected, it will throw an error
+func (specs ExtensionTestSpecs) MustSelectAny(selectFns []SelectFunction) (ExtensionTestSpecs, error) {
+ filtered := specs.SelectAny(selectFns)
+ if len(filtered) == 0 {
+ return filtered, fmt.Errorf("no specs selected with specified SelectFunctions")
+ }
+
+ return filtered, nil
+}
+
+// SelectAll filters the ExtensionTestSpecs to only those that match all the provided SelectFunctions
+func (specs ExtensionTestSpecs) SelectAll(selectFns []SelectFunction) ExtensionTestSpecs {
+ filtered := ExtensionTestSpecs{}
+ for _, spec := range specs {
+ anyFalse := false
+ for _, selectFn := range selectFns {
+ if !selectFn(spec) {
+ anyFalse = true
+ break
+ }
+ }
+ if !anyFalse {
+ filtered = append(filtered, spec)
+ }
+ }
+
+ return filtered
+}
+
+// MustSelectAll filters the ExtensionTestSpecs to only those that match all the provided SelectFunctions.
+// if no specs are selected, it will throw an error
+func (specs ExtensionTestSpecs) MustSelectAll(selectFns []SelectFunction) (ExtensionTestSpecs, error) {
+ filtered := specs.SelectAll(selectFns)
+ if len(filtered) == 0 {
+ return filtered, fmt.Errorf("no specs selected with specified SelectFunctions")
+ }
+
+ return filtered, nil
+}
+
+// ModuleTestsOnly ensures that ginkgo tests from vendored sources aren't selected. Unfortunately, making
+// use of kubernetes test helpers results in the entire Ginkgo suite being initialized (ginkgo loves global state),
+// so we need to be careful about which tests we select.
+//
+// A test is excluded if ALL of its code locations with full paths are external (vendored or from external test
+// suites). If at least one code location with a full path is from the local module, the test is included, because
+// local tests may legitimately call helper functions from vendored test frameworks.
+func ModuleTestsOnly() SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ hasLocalCode := false
+
+ for _, cl := range spec.CodeLocations {
+ // Short-form code locations (e.g., "set up framework | framework.go:200") are ignored in this determination.
+ if !strings.Contains(cl, "/") {
+ continue
+ }
+
+ // If this code location is not external (vendored or k8s test), it's local code
+ if !(strings.Contains(cl, "/vendor/") || strings.HasPrefix(cl, "k8s.io/kubernetes")) {
+ hasLocalCode = true
+ break
+ }
+ }
+
+ // Include the test only if it has at least one local code location
+ return hasLocalCode
+ }
+}
+
+// AllTestsIncludingVendored is an alternative to ModuleTestsOnly, which would explicitly opt-in
+// to including vendored tests.
+func AllTestsIncludingVendored() SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ return true
+ }
+}
+
+// NameContains returns a function that selects specs whose name contains the provided string
+func NameContains(name string) SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ return strings.Contains(spec.Name, name)
+ }
+}
+
+// NameContainsAll returns a function that selects specs whose name contains each of the provided contents strings
+func NameContainsAll(contents ...string) SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ for _, content := range contents {
+ if !strings.Contains(spec.Name, content) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// HasLabel returns a function that selects specs with the provided label
+func HasLabel(label string) SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ return spec.Labels.Has(label)
+ }
+}
+
+// HasTagWithValue returns a function that selects specs containing a tag with the provided key and value
+func HasTagWithValue(key, value string) SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ return spec.Tags[key] == value
+ }
+}
+
+// WithLifecycle returns a function that selects specs with the provided Lifecycle
+func WithLifecycle(lifecycle Lifecycle) SelectFunction {
+ return func(spec *ExtensionTestSpec) bool {
+ return spec.Lifecycle == lifecycle
+ }
+}
+
+func (specs ExtensionTestSpecs) Names() []string {
+ var names []string
+ for _, spec := range specs {
+ names = append(names, spec.Name)
+ }
+ return names
+}
+
+// Run executes all the specs in parallel, up to maxConcurrent at the same time. Results
+// are written to the given ResultWriter after each spec has completed execution. BeforeEach,
+// BeforeAll, AfterEach, AfterAll hooks are executed when specified. "Each" hooks must be thread
+// safe. Returns an error if any test spec failed, indicating the quantity of failures.
+func (specs ExtensionTestSpecs) Run(ctx context.Context, w ResultWriter, maxConcurrent int) ([]*ExtensionTestResult, error) {
+ queue := make(chan *ExtensionTestSpec)
+ terminalFailures := atomic.Int64{}
+ nonTerminalFailures := atomic.Int64{}
+
+ // Execute beforeAll
+ for _, spec := range specs {
+ for _, beforeAllTask := range spec.beforeAll {
+ beforeAllTask.Run()
+ }
+ }
+
+ // Feed the queue
+ go func() {
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ queue <- spec
+ })
+ close(queue)
+ }()
+
+ // if we have only a single spec to run, we do that differently than running multiple.
+ // multiple specs can run in parallel and do so by exec-ing back into the binary with `run-test` with a single test to execute.
+ // This means that to avoid infinite recursion, when requesting a single test to run
+ // we need to run it in process.
+ runSingleSpec := len(specs) == 1
+
+ // Start consumers
+ var wg sync.WaitGroup
+ resultChan := make(chan *ExtensionTestResult, len(specs))
+ for i := 0; i < maxConcurrent; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for spec := range queue {
+ for _, beforeEachTask := range spec.beforeEach {
+ beforeEachTask.Run(*spec)
+ }
+
+ res := runSpec(ctx, spec, runSingleSpec)
+ if res.Result == ResultFailed {
+ if res.Lifecycle.IsTerminal() {
+ terminalFailures.Add(1)
+ } else {
+ nonTerminalFailures.Add(1)
+ }
+ }
+
+ for _, afterEachTask := range spec.afterEach {
+ afterEachTask.Run(res)
+ }
+
+ // We can't assume the runner will set the name of a test; it may not know it. Even if
+ // it does, we may want to modify it (e.g. k8s-tests for annotations currently).
+ res.Name = spec.Name
+ w.Write(res)
+ resultChan <- res
+ }
+ }()
+ }
+
+ // Wait for all consumers to finish
+ wg.Wait()
+ close(resultChan)
+
+ // Execute afterAll
+ for _, spec := range specs {
+ for _, afterAllTask := range spec.afterAll {
+ afterAllTask.Run()
+ }
+ }
+
+ var results []*ExtensionTestResult
+ for res := range resultChan {
+ results = append(results, res)
+ }
+
+ terminalFailCount := terminalFailures.Load()
+ nonTerminalFailCount := nonTerminalFailures.Load()
+
+ // Non-terminal failures don't cause exit 1, but we still log them
+ if nonTerminalFailCount > 0 {
+ fmt.Fprintf(os.Stderr, "%d informing tests failed (not terminal)\n", nonTerminalFailCount)
+ }
+
+ // Only exit with error if terminal lifecycle tests failed
+ if terminalFailCount > 0 {
+ if nonTerminalFailCount > 0 {
+ return results, fmt.Errorf("%d tests failed (%d informing)", terminalFailCount+nonTerminalFailCount, nonTerminalFailCount)
+ }
+ return results, fmt.Errorf("%d tests failed", terminalFailCount)
+ }
+
+ return results, nil
+}
+
+// AddBeforeAll adds a function to be run once before all tests start executing.
+func (specs ExtensionTestSpecs) AddBeforeAll(fn func()) {
+ task := &OneTimeTask{fn: fn}
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ spec.beforeAll = append(spec.beforeAll, task)
+ })
+}
+
+// AddAfterAll adds a function to be run once after all tests have finished.
+func (specs ExtensionTestSpecs) AddAfterAll(fn func()) {
+ task := &OneTimeTask{fn: fn}
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ spec.afterAll = append(spec.afterAll, task)
+ })
+}
+
+// AddBeforeEach adds a function that runs before each test starts executing. The ExtensionTestSpec is
+// passed in for contextual information, but must not be modified. The provided function must be thread
+// safe.
+func (specs ExtensionTestSpecs) AddBeforeEach(fn func(spec ExtensionTestSpec)) {
+ task := &SpecTask{fn: fn}
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ spec.beforeEach = append(spec.beforeEach, task)
+ })
+}
+
+// AddAfterEach adds a function that runs after each test has finished executing. The ExtensionTestResult
+// can be modified if needed. The provided function must be thread safe.
+func (specs ExtensionTestSpecs) AddAfterEach(fn func(task *ExtensionTestResult)) {
+ task := &TestResultTask{fn: fn}
+ specs.Walk(func(spec *ExtensionTestSpec) {
+ spec.afterEach = append(spec.afterEach, task)
+ })
+}
+
+// MustFilter filters specs using the given celExprs. Each celExpr is OR'd together, if any
+// match the spec is included in the filtered set. If your CEL expression is invalid or filtering
+// otherwise fails, this function panics.
+func (specs ExtensionTestSpecs) MustFilter(celExprs []string) ExtensionTestSpecs {
+ specs, err := specs.Filter(celExprs)
+ if err != nil {
+ panic(fmt.Sprintf("filter did not succeed: %s", err.Error()))
+ }
+
+ return specs
+}
+
+// Filter filters specs using the given celExprs. Each celExpr is OR'd together, if any
+// match the spec is included in the filtered set.
+func (specs ExtensionTestSpecs) Filter(celExprs []string) (ExtensionTestSpecs, error) {
+ var filteredSpecs ExtensionTestSpecs
+
+ // Empty filters returns all
+ if len(celExprs) == 0 {
+ return specs, nil
+ }
+
+ env, err := cel.NewEnv(
+ cel.Declarations(
+ decls.NewVar("source", decls.String),
+ decls.NewVar("name", decls.String),
+ decls.NewVar("originalName", decls.String),
+ decls.NewVar("labels", decls.NewListType(decls.String)),
+ decls.NewVar("codeLocations", decls.NewListType(decls.String)),
+ decls.NewVar("tags", decls.NewMapType(decls.String, decls.String)),
+ ),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create CEL environment: %w", err)
+ }
+
+ // OR all expressions together
+ for _, spec := range specs {
+ include := false
+ for _, celExpr := range celExprs {
+ prg, err := programForCEL(env, celExpr)
+ if err != nil {
+ return nil, err
+ }
+ out, _, err := prg.Eval(map[string]interface{}{
+ "name": spec.Name,
+ "source": spec.Source,
+ "originalName": spec.OriginalName,
+ "labels": spec.Labels.UnsortedList(),
+ "codeLocations": spec.CodeLocations,
+ "tags": spec.Tags,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error evaluating CEL expression: %v", err)
+ }
+
+ // If any CEL expression evaluates to true, include the TestSpec
+ if out == types.True {
+ include = true
+ break
+ }
+ }
+ if include {
+ filteredSpecs = append(filteredSpecs, spec)
+ }
+ }
+
+ return filteredSpecs, nil
+}
+
+func programForCEL(env *cel.Env, celExpr string) (cel.Program, error) {
+ // Parse CEL expression
+ ast, iss := env.Parse(celExpr)
+ if iss.Err() != nil {
+ return nil, fmt.Errorf("error parsing CEL expression '%s': %v", celExpr, iss.Err())
+ }
+
+ // Check the AST
+ checked, iss := env.Check(ast)
+ if iss.Err() != nil {
+ return nil, fmt.Errorf("error checking CEL expression '%s': %v", celExpr, iss.Err())
+ }
+
+ // Create a CEL program from the checked AST
+ prg, err := env.Program(checked)
+ if err != nil {
+ return nil, fmt.Errorf("error creating CEL program: %v", err)
+ }
+ return prg, nil
+}
+
+// FilterByEnvironment checks both the Include and Exclude fields of the ExtensionTestSpec to return those specs which match.
+// Tests will be included by default unless they are explicitly excluded. If Include is specified, only those tests matching
+// the CEL expression will be included.
+//
+// See helper functions in extensiontests/environment.go to craft CEL expressions
+func (specs ExtensionTestSpecs) FilterByEnvironment(envFlags flags.EnvironmentalFlags) (ExtensionTestSpecs, error) {
+ var filteredSpecs ExtensionTestSpecs
+ if envFlags.IsEmpty() {
+ return specs, nil
+ }
+
+ env, err := cel.NewEnv(
+ cel.Declarations(
+ decls.NewVar("apiGroups", decls.NewListType(decls.String)),
+ decls.NewVar("architecture", decls.String),
+ decls.NewVar("externalConnectivity", decls.String),
+ decls.NewVar("fact_keys", decls.NewListType(decls.String)),
+ decls.NewVar("facts", decls.NewMapType(decls.String, decls.String)),
+ decls.NewVar("featureGates", decls.NewListType(decls.String)),
+ decls.NewVar("network", decls.String),
+ decls.NewVar("networkStack", decls.String),
+ decls.NewVar("optionalCapabilities", decls.NewListType(decls.String)),
+ decls.NewVar("platform", decls.String),
+ decls.NewVar("topology", decls.String),
+ decls.NewVar("upgrade", decls.String),
+ decls.NewVar("version", decls.String),
+ ),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create CEL environment: %w", err)
+ }
+ factKeys := make([]string, len(envFlags.Facts))
+ for k := range envFlags.Facts {
+ factKeys = append(factKeys, k)
+ }
+ vars := map[string]interface{}{
+ "apiGroups": envFlags.APIGroups,
+ "architecture": envFlags.Architecture,
+ "externalConnectivity": envFlags.ExternalConnectivity,
+ "fact_keys": factKeys,
+ "facts": envFlags.Facts,
+ "featureGates": envFlags.FeatureGates,
+ "network": envFlags.Network,
+ "networkStack": envFlags.NetworkStack,
+ "optionalCapabilities": envFlags.OptionalCapabilities,
+ "platform": envFlags.Platform,
+ "topology": envFlags.Topology,
+ "upgrade": envFlags.Upgrade,
+ "version": envFlags.Version,
+ }
+
+ for _, spec := range specs {
+ envSel := spec.EnvironmentSelector
+ // If there is no include or exclude CEL, include it implicitly
+ if envSel.IsEmpty() {
+ filteredSpecs = append(filteredSpecs, spec)
+ continue
+ }
+
+ if envSel.Exclude != "" {
+ prg, err := programForCEL(env, envSel.Exclude)
+ if err != nil {
+ return nil, err
+ }
+ out, _, err := prg.Eval(vars)
+ if err != nil {
+ return nil, fmt.Errorf("error evaluating CEL expression: %v", err)
+ }
+ // If it is explicitly excluded, don't check include
+ if out == types.True {
+ continue
+ }
+ }
+
+ if envSel.Include != "" {
+ prg, err := programForCEL(env, envSel.Include)
+ if err != nil {
+ return nil, err
+ }
+ out, _, err := prg.Eval(vars)
+ if err != nil {
+ return nil, fmt.Errorf("error evaluating CEL expression: %v", err)
+ }
+
+ if out == types.True {
+ filteredSpecs = append(filteredSpecs, spec)
+ }
+ } else { // If it hasn't been excluded, and there is no "include" it will be implicitly included
+ filteredSpecs = append(filteredSpecs, spec)
+ }
+
+ }
+
+ return filteredSpecs, nil
+}
+
+// AddLabel adds the labels to each spec.
+func (specs ExtensionTestSpecs) AddLabel(labels ...string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Labels.Insert(labels...)
+ }
+
+ return specs
+}
+
+// RemoveLabel removes the labels from each spec.
+func (specs ExtensionTestSpecs) RemoveLabel(labels ...string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Labels.Delete(labels...)
+ }
+
+ return specs
+}
+
+// SetTag specifies a key/value pair for each spec.
+func (specs ExtensionTestSpecs) SetTag(key, value string) ExtensionTestSpecs {
+ for i := range specs {
+ specs[i].Tags[key] = value
+ }
+
+ return specs
+}
+
+// UnsetTag removes the specified key from each spec.
+func (specs ExtensionTestSpecs) UnsetTag(key string) ExtensionTestSpecs {
+ for i := range specs {
+ delete(specs[i].Tags, key)
+ }
+
+ return specs
+}
+
+// Include adds the specified CEL expression to explicitly include tests by environment to each spec
+func (specs ExtensionTestSpecs) Include(includeCEL string) ExtensionTestSpecs {
+ for _, spec := range specs {
+ spec.Include(includeCEL)
+ }
+ return specs
+}
+
+// Exclude adds the specified CEL expression to explicitly exclude tests by environment to each spec
+func (specs ExtensionTestSpecs) Exclude(excludeCEL string) ExtensionTestSpecs {
+ for _, spec := range specs {
+ spec.Exclude(excludeCEL)
+ }
+ return specs
+}
+
+// Include adds the specified CEL expression to explicitly include tests by environment.
+// If there is already an "include" defined, it will OR the expressions together
+func (spec *ExtensionTestSpec) Include(includeCEL string) *ExtensionTestSpec {
+ existingInclude := spec.EnvironmentSelector.Include
+ if existingInclude != "" {
+ includeCEL = fmt.Sprintf("(%s) || (%s)", existingInclude, includeCEL)
+ }
+
+ spec.EnvironmentSelector.Include = includeCEL
+ return spec
+}
+
+// Exclude adds the specified CEL expression to explicitly exclude tests by environment.
+// If there is already an "exclude" defined, it will OR the expressions together
+func (spec *ExtensionTestSpec) Exclude(excludeCEL string) *ExtensionTestSpec {
+ existingExclude := spec.EnvironmentSelector.Exclude
+ if existingExclude != "" {
+ excludeCEL = fmt.Sprintf("(%s) || (%s)", existingExclude, excludeCEL)
+ }
+
+ spec.EnvironmentSelector.Exclude = excludeCEL
+ return spec
+}
+
+func runSpec(ctx context.Context, spec *ExtensionTestSpec, runSingleSpec bool) *ExtensionTestResult {
+ startTime := time.Now().UTC()
+ var res *ExtensionTestResult
+ if runSingleSpec || spec.RunParallel == nil {
+ res = spec.Run(ctx)
+ } else {
+ res = spec.RunParallel(ctx)
+ }
+ duration := time.Since(startTime)
+ endTime := startTime.Add(duration).UTC()
+ if res == nil {
+ // this shouldn't happen
+ panic(fmt.Sprintf("test produced no result: %s", spec.Name))
+ }
+
+ res.Lifecycle = spec.Lifecycle
+
+ // If the runner doesn't populate this info, we should set it
+ if res.StartTime == nil {
+ res.StartTime = dbtime.Ptr(startTime)
+ }
+ if res.EndTime == nil {
+ res.EndTime = dbtime.Ptr(endTime)
+ }
+ if res.Duration == 0 {
+ res.Duration = duration.Milliseconds()
+ }
+
+ return res
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go
new file mode 100644
index 000000000..e808bea87
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go
@@ -0,0 +1,31 @@
+package extensiontests
+
+import "sync/atomic"
+
+type SpecTask struct {
+ fn func(spec ExtensionTestSpec)
+}
+
+func (t *SpecTask) Run(spec ExtensionTestSpec) {
+ t.fn(spec)
+}
+
+type TestResultTask struct {
+ fn func(result *ExtensionTestResult)
+}
+
+func (t *TestResultTask) Run(result *ExtensionTestResult) {
+ t.fn(result)
+}
+
+type OneTimeTask struct {
+ fn func()
+ executed int32 // Atomic boolean to indicate whether the function has been run
+}
+
+func (t *OneTimeTask) Run() {
+ // Ensure one-time tasks are only run once
+ if atomic.CompareAndSwapInt32(&t.executed, 0, 1) {
+ t.fn()
+ }
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go
new file mode 100644
index 000000000..cd23be81f
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go
@@ -0,0 +1,119 @@
+package extensiontests
+
+import (
+ "context"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets"
+)
+
+type Lifecycle string
+
+var LifecycleInforming Lifecycle = "informing"
+var LifecycleBlocking Lifecycle = "blocking"
+
+// IsTerminal returns true if failures in tests with this lifecycle should cause
+// the test run to exit with a non-zero exit code.
+func (l Lifecycle) IsTerminal() bool {
+ return l != LifecycleInforming
+}
+
+type ExtensionTestSpecs []*ExtensionTestSpec
+
+type ExtensionTestSpec struct {
+ Name string `json:"name"`
+
+ // OriginalName contains the very first name this test was ever known as, used to preserve
+ // history across all names.
+ OriginalName string `json:"originalName,omitempty"`
+
+ // Labels are single string values to apply to the test spec
+ Labels sets.Set[string] `json:"labels"`
+
+ // Tags are key:value pairs
+ Tags map[string]string `json:"tags,omitempty"`
+
+ // Resources gives optional information about what's required to run this test.
+ Resources Resources `json:"resources"`
+
+ // Source is the origin of the test.
+ Source string `json:"source"`
+
+ // CodeLocations are the files where the spec originates from.
+ CodeLocations []string `json:"codeLocations,omitempty"`
+
+ // Lifecycle informs the executor whether the test is informing only, and should not cause the
+ // overall job run to fail, or if it's blocking where a failure of the test is fatal.
+ // Informing lifecycle tests can be used temporarily to gather information about a test's stability.
+ // Tests must not remain informing forever.
+ Lifecycle Lifecycle `json:"lifecycle"`
+
+ // EnvironmentSelector allows for CEL expressions to be used to control test inclusion
+ EnvironmentSelector EnvironmentSelector `json:"environmentSelector,omitempty"`
+
+ // Run invokes a test in-process. It must not call back into `ote-binary run-test` because that will usually
+ // cause an infinite recursion.
+ Run func(ctx context.Context) *ExtensionTestResult `json:"-"`
+
+ // RunParallel invokes a test in parallel with other tests. This is usually done by exec-ing out
+ // to the `ote-binary run-test "test name"` commmand and interpretting the result.
+ RunParallel func(ctx context.Context) *ExtensionTestResult `json:"-"`
+
+ // Hook functions
+ afterAll []*OneTimeTask
+ beforeAll []*OneTimeTask
+ afterEach []*TestResultTask
+ beforeEach []*SpecTask
+}
+
+type Resources struct {
+ Isolation Isolation `json:"isolation"`
+ Memory string `json:"memory,omitempty"`
+ Duration string `json:"duration,omitempty"`
+ Timeout string `json:"timeout,omitempty"`
+}
+
+type Isolation struct {
+ Mode string `json:"mode,omitempty"`
+ Conflict []string `json:"conflict,omitempty"`
+ Taint []string `json:"taint,omitempty"`
+ Toleration []string `json:"toleration,omitempty"`
+}
+
+type EnvironmentSelector struct {
+ Include string `json:"include,omitempty"`
+ Exclude string `json:"exclude,omitempty"`
+}
+
+func (e EnvironmentSelector) IsEmpty() bool {
+ return e.Include == "" && e.Exclude == ""
+}
+
+type ExtensionTestResults []*ExtensionTestResult
+
+type Result string
+
+var ResultPassed Result = "passed"
+var ResultSkipped Result = "skipped"
+var ResultFailed Result = "failed"
+
+type ExtensionTestResult struct {
+ Name string `json:"name"`
+ Lifecycle Lifecycle `json:"lifecycle"`
+ Duration int64 `json:"duration"`
+ StartTime *dbtime.DBTime `json:"startTime"`
+ EndTime *dbtime.DBTime `json:"endTime"`
+ Result Result `json:"result"`
+ Output string `json:"output"`
+ Error string `json:"error,omitempty"`
+ Details []Details `json:"details,omitempty"`
+}
+
+// Details are human-readable messages to further explain skips, timeouts, etc.
+// It can also be used to provide contemporaneous information about failures
+// that may not be easily returned by must-gather. For larger artifacts (greater than
+// 10KB, write them to $EXTENSION_ARTIFACTS_DIR.
+type Details struct {
+ Name string `json:"name"`
+ Value interface{} `json:"value"`
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/viewer.html b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/viewer.html
new file mode 100644
index 000000000..2ff236aa3
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/viewer.html
@@ -0,0 +1,1520 @@
+
+
+
+
+
+ Results for {{ .SuiteName }}
+
+
+
+
+
+
+
+ Results for {{ .SuiteName }}
+
+
No file loaded
+
+
+
+
Load Test Results
+
Drag and drop a JSON test results file here, or click to browse
+
+
+
+
+
+
+
0
+
Total
+
+
+
0
+
Passed
+
+
+
0
+
Failed
+
+
+
0
+
Flaky
+
+
+
0
+
Skipped
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0ms - 0ms
+
+
+
+
+
+
+
+
+ 0ms
+ 0ms
+
+
+
+
+
+ 0 tests
+
+
+
+
+
+
+
+
+
+
+ Page 1 of 1
+
+
+
+
+
+
No tests match your filters
+
Try adjusting your search criteria
+
+
+
+
+
+
+
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go
new file mode 100644
index 000000000..bbae421df
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go
@@ -0,0 +1,39 @@
+package extension
+
+const DefaultExtension = "default"
+
+type Registry struct {
+ extensions map[string]*Extension
+}
+
+func NewRegistry() *Registry {
+ var r Registry
+ return &r
+}
+
+func (r *Registry) Walk(walkFn func(*Extension)) {
+ for k := range r.extensions {
+ if k == DefaultExtension {
+ continue
+ }
+ walkFn(r.extensions[k])
+ }
+}
+
+func (r *Registry) Get(name string) *Extension {
+ return r.extensions[name]
+}
+
+func (r *Registry) Register(extension *Extension) {
+ if r.extensions == nil {
+ r.extensions = make(map[string]*Extension)
+ // first extension is default
+ r.extensions[DefaultExtension] = extension
+ }
+
+ r.extensions[extension.Component.Identifier()] = extension
+}
+
+func (r *Registry) Deregister(name string) {
+ delete(r.extensions, name)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go
new file mode 100644
index 000000000..00d2d9d66
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go
@@ -0,0 +1,94 @@
+package extension
+
+import (
+ "time"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets"
+)
+
+const CurrentExtensionAPIVersion = "v1.1"
+
+// Extension represents an extension to openshift-tests.
+type Extension struct {
+ APIVersion string `json:"apiVersion"`
+ Source Source `json:"source"`
+ Component Component `json:"component"`
+
+ // Suites that the extension wants to advertise/participate in.
+ Suites []Suite `json:"suites"`
+
+ Images []Image `json:"images"`
+
+ // Private data
+ specs extensiontests.ExtensionTestSpecs
+ obsoleteTests sets.Set[string]
+}
+
+// Source contains the details of the commit and source URL.
+type Source struct {
+ // Commit from which this binary was compiled.
+ Commit string `json:"commit"`
+ // BuildDate ISO8601 string of when the binary was built
+ BuildDate string `json:"build_date"`
+ // GitTreeState lets you know the status of the git tree (clean/dirty)
+ GitTreeState string `json:"git_tree_state"`
+ // SourceURL contains the url of the git repository (if known) that this extension was built from.
+ SourceURL string `json:"source_url,omitempty"`
+}
+
+// Component represents the component the binary acts on.
+type Component struct {
+ // The product this component is part of.
+ Product string `json:"product"`
+ // The type of the component.
+ Kind string `json:"type"`
+ // The name of the component.
+ Name string `json:"name"`
+}
+
+type ClusterStability string
+
+var (
+ // ClusterStabilityStable means that at no point during testing do we expect a component to take downtime and upgrades are not happening.
+ ClusterStabilityStable ClusterStability = "Stable"
+
+ // ClusterStabilityDisruptive means that the suite is expected to induce outages to the cluster.
+ ClusterStabilityDisruptive ClusterStability = "Disruptive"
+
+ // ClusterStabilityUpgrade was previously defined, but was removed by @deads2k. Please contact him if you find a use
+ // case for it and needs to be reintroduced.
+ // ClusterStabilityUpgrade ClusterStability = "Upgrade"
+)
+
+// Suite represents additional suites the extension wants to advertise. Child suites when being executed in the context
+// of a parent will have their count, parallelism, stability, and timeout options superseded by the parent's suite.
+type Suite struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+
+ // Parents are the parent suites this suite is part of.
+ Parents []string `json:"parents,omitempty"`
+ // Qualifiers are CEL expressions that are OR'd together for test selection that are members of the suite.
+ Qualifiers []string `json:"qualifiers,omitempty"`
+
+ // Count is the default number of times to execute each test in this suite.
+ Count int `json:"count,omitempty"`
+ // Parallelism is the maximum parallelism of this suite.
+ Parallelism int `json:"parallelism,omitempty"`
+ // ClusterStability informs openshift-tests whether this entire test suite is expected to be disruptive or not
+ // to normal cluster operations.
+ ClusterStability ClusterStability `json:"clusterStability,omitempty"`
+ // TestTimeout is the default timeout for tests in this suite.
+ TestTimeout *time.Duration `json:"testTimeout,omitempty"`
+}
+
+type Image struct {
+ Index int `json:"index"`
+ Registry string `json:"registry"`
+ Name string `json:"name"`
+ Version string `json:"version"`
+ // Mapped is the image reference that this image is mirrored to by the image mirror tool.
+ // This field should be populated if the mirrored image reference is predetermined by the test extensions.
+ Mapped *Image `json:"mapped,omitempty"`
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go
new file mode 100644
index 000000000..ca9e425c4
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go
@@ -0,0 +1,25 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+const DefaultExtension = "default"
+
+// ComponentFlags contains information for specifying the component.
+type ComponentFlags struct {
+ Component string
+}
+
+func NewComponentFlags() *ComponentFlags {
+ return &ComponentFlags{
+ Component: DefaultExtension,
+ }
+}
+
+func (f *ComponentFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&f.Component,
+ "component",
+ f.Component,
+ "specify the component to enable")
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go
new file mode 100644
index 000000000..2db07c765
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go
@@ -0,0 +1,23 @@
+package flags
+
+import "github.com/spf13/pflag"
+
+// ConcurrencyFlags contains information for configuring concurrency
+type ConcurrencyFlags struct {
+ MaxConcurency int
+}
+
+func NewConcurrencyFlags() *ConcurrencyFlags {
+ return &ConcurrencyFlags{
+ MaxConcurency: 10,
+ }
+}
+
+func (f *ConcurrencyFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.IntVarP(&f.MaxConcurency,
+ "max-concurrency",
+ "c",
+ f.MaxConcurency,
+ "maximum number of tests to run in parallel",
+ )
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go
new file mode 100644
index 000000000..af7a0258e
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go
@@ -0,0 +1,114 @@
+package flags
+
+import (
+ "reflect"
+
+ "github.com/spf13/pflag"
+)
+
+type EnvironmentalFlags struct {
+ APIGroups []string
+ Architecture string
+ ExternalConnectivity string
+ Facts map[string]string
+ FeatureGates []string
+ Network string
+ NetworkStack string
+ OptionalCapabilities []string
+ Platform string
+ Topology string
+ Upgrade string
+ Version string
+}
+
+func NewEnvironmentalFlags() *EnvironmentalFlags {
+ return &EnvironmentalFlags{}
+}
+
+func (f *EnvironmentalFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringArrayVar(&f.APIGroups,
+ "api-group",
+ f.APIGroups,
+ "The API groups supported by this cluster. Since: v1.1")
+ fs.StringVar(&f.Architecture,
+ "architecture",
+ "",
+ "The CPU architecture of the target cluster (\"amd64\", \"arm64\"). Since: v1.0")
+ fs.StringVar(&f.ExternalConnectivity,
+ "external-connectivity",
+ "",
+ "The External Connectivity of the target cluster (\"Disconnected\", \"Direct\", \"Proxied\"). Since: v1.0")
+ fs.StringArrayVar(&f.FeatureGates,
+ "feature-gate",
+ f.FeatureGates,
+ "The feature gates enabled on this cluster. Since: v1.1")
+ fs.StringToStringVar(&f.Facts,
+ "fact",
+ make(map[string]string),
+ "Facts advertised by cluster components. Since: v1.0")
+ fs.StringVar(&f.Network,
+ "network",
+ "",
+ "The network of the target cluster (\"ovn\", \"sdn\"). Since: v1.0")
+ fs.StringVar(&f.NetworkStack,
+ "network-stack",
+ "",
+ "The network stack of the target cluster (\"ipv6\", \"ipv4\", \"dual\"). Since: v1.0")
+ fs.StringSliceVar(&f.OptionalCapabilities,
+ "optional-capability",
+ []string{},
+ "An Optional Capability of the target cluster. Can be passed multiple times. Since: v1.0")
+ fs.StringVar(&f.Platform,
+ "platform",
+ "",
+ "The hardware or cloud platform (\"aws\", \"gcp\", \"metal\", ...). Since: v1.0")
+ fs.StringVar(&f.Topology,
+ "topology",
+ "",
+ "The target cluster topology (\"ha\", \"microshift\", ...). Since: v1.0")
+ fs.StringVar(&f.Upgrade,
+ "upgrade",
+ "",
+ "The upgrade that was performed prior to the test run (\"micro\", \"minor\"). Since: v1.0")
+ fs.StringVar(&f.Version,
+ "version",
+ "",
+ "\"major.minor\" version of target cluster. Since: v1.0")
+}
+
+func (f *EnvironmentalFlags) IsEmpty() bool {
+ v := reflect.ValueOf(*f)
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+
+ switch field.Kind() {
+ case reflect.Slice, reflect.Map:
+ if !field.IsNil() && field.Len() > 0 {
+ return false
+ }
+ default:
+ if !reflect.DeepEqual(field.Interface(), reflect.Zero(field.Type()).Interface()) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// EnvironmentFlagVersions holds the "Since" version metadata for each flag.
+var EnvironmentFlagVersions = map[string]string{
+ "api-group": "v1.1",
+ "architecture": "v1.0",
+ "external-connectivity": "v1.0",
+ "fact": "v1.0",
+ "feature-gate": "v1.1",
+ "network": "v1.0",
+ "network-stack": "v1.0",
+ "optional-capability": "v1.0",
+ "platform": "v1.0",
+ "topology": "v1.0",
+ "upgrade": "v1.0",
+ "version": "v1.0",
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go
new file mode 100644
index 000000000..9e5864839
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go
@@ -0,0 +1,24 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// NamesFlags contains information for specifying multiple test names.
+type NamesFlags struct {
+ Names []string
+}
+
+func NewNamesFlags() *NamesFlags {
+ return &NamesFlags{
+ Names: []string{},
+ }
+}
+
+func (f *NamesFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringArrayVarP(&f.Names,
+ "names",
+ "n",
+ f.Names,
+ "specify test name (can be specified multiple times)")
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go
new file mode 100644
index 000000000..24f49f638
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go
@@ -0,0 +1,95 @@
+package flags
+
+import (
+ "encoding/json"
+ "reflect"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/pflag"
+)
+
+// OutputFlags contains information for specifying multiple test names.
+type OutputFlags struct {
+ Output string
+}
+
+func NewOutputFlags() *OutputFlags {
+ return &OutputFlags{
+ Output: "json",
+ }
+}
+
+func (f *OutputFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVarP(&f.Output,
+ "output",
+ "o",
+ f.Output,
+ "output mode")
+}
+
+func (o *OutputFlags) Marshal(v interface{}) ([]byte, error) {
+ switch o.Output {
+ case "", "json":
+ j, err := json.MarshalIndent(&v, "", " ")
+ if err != nil {
+ return nil, err
+ }
+ return j, nil
+ case "jsonl":
+ // Check if v is a slice or array
+ val := reflect.ValueOf(v)
+ if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+ var result []byte
+ for i := 0; i < val.Len(); i++ {
+ item := val.Index(i).Interface()
+ j, err := json.Marshal(item)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, j...)
+ result = append(result, '\n') // Append newline after each item
+ }
+ return result, nil
+ }
+ return nil, errors.New("jsonl format requires a slice or array")
+ case "names":
+ val := reflect.ValueOf(v)
+ if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+ var names []string
+ outerLoop:
+ for i := 0; i < val.Len(); i++ {
+ item := val.Index(i)
+ // Check for Name() or Identifier() methods
+ itemInterface := item.Interface()
+ nameFuncs := []string{"Name", "Identifier"}
+ for _, fn := range nameFuncs {
+ method := reflect.ValueOf(itemInterface).MethodByName(fn)
+ if method.IsValid() && method.Kind() == reflect.Func && method.Type().NumIn() == 0 && method.Type().NumOut() == 1 && method.Type().Out(0).Kind() == reflect.String {
+ name := method.Call(nil)[0].String()
+ names = append(names, name)
+ continue outerLoop
+ }
+ }
+
+ // Dereference pointer if needed
+ if item.Kind() == reflect.Ptr {
+ item = item.Elem()
+ }
+ // Check for struct with Name field
+ if item.Kind() == reflect.Struct {
+ nameField := item.FieldByName("Name")
+ if nameField.IsValid() && nameField.Kind() == reflect.String {
+ names = append(names, nameField.String())
+ }
+ } else {
+ return nil, errors.New("items must have a Name field or a Name() method")
+ }
+ }
+ return []byte(strings.Join(names, "\n")), nil
+ }
+ return nil, errors.New("names format requires an array of structs")
+ default:
+ return nil, errors.Errorf("invalid output format: %s", o.Output)
+ }
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go
new file mode 100644
index 000000000..23de832a8
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go
@@ -0,0 +1,21 @@
+package flags
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// SuiteFlags contains information for specifying the suite.
+type SuiteFlags struct {
+ Suite string
+}
+
+func NewSuiteFlags() *SuiteFlags {
+ return &SuiteFlags{}
+}
+
+func (f *SuiteFlags) BindFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&f.Suite,
+ "suite",
+ f.Suite,
+ "specify the suite to use")
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/logging.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/logging.go
new file mode 100644
index 000000000..0b84ca41c
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/logging.go
@@ -0,0 +1,21 @@
+package ginkgo
+
+import (
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+ "github.com/onsi/ginkgo/v2"
+)
+
+// this is copied from ginkgo because ginkgo made it internal and then hardcoded an init block
+// using these functions to wire to os.stdout and we want to wire to stderr (or a different buffer) so we can
+// have json output.
+
+func GinkgoLogrFunc(writer ginkgo.GinkgoWriterInterface) logr.Logger {
+ return funcr.New(func(prefix, args string) {
+ if prefix == "" {
+ writer.Printf("%s\n", args)
+ } else {
+ writer.Printf("%s %s\n", prefix, args)
+ }
+ }, funcr.Options{})
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/parallel.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/parallel.go
new file mode 100644
index 000000000..890cebb09
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/parallel.go
@@ -0,0 +1,139 @@
+package ginkgo
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "syscall"
+ "time"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime"
+ "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+func SpawnProcessToRunTest(ctx context.Context, testName string, timeout time.Duration) *extensiontests.ExtensionTestResult {
+ // longerCtx is used to backstop the process, but leave termination up to us if possible to allow a double interrupt
+ longerCtx, longerCancel := context.WithTimeout(ctx, timeout+15*time.Minute)
+ defer longerCancel()
+ timeoutCtx, shorterCancel := context.WithTimeout(longerCtx, timeout)
+ defer shorterCancel()
+
+ stdout := &bytes.Buffer{}
+ stderr := &bytes.Buffer{}
+
+ command := exec.CommandContext(longerCtx, os.Args[0], "run-test", "--output=json", testName)
+ command.Stdout = stdout
+ command.Stderr = stderr
+
+ start := time.Now()
+ err := command.Start()
+ if err != nil {
+ fmt.Fprintf(stderr, "Command Start Error: %v\n", err)
+ return newTestResult(testName, extensiontests.ResultFailed, start, time.Now(), stdout, stderr)
+ }
+
+ go func() {
+ select {
+ // interrupt tests after timeout, and abort if they don't complete quick enough
+ case <-time.After(timeout):
+ if command.Process != nil {
+ // we're not going to do anything with the err
+ _ = command.Process.Signal(syscall.SIGINT)
+ }
+ // if the process appears to be hung a significant amount of time after the timeout
+ // send an ABRT so we get a stack dump
+ select {
+ case <-time.After(time.Minute):
+ if command.Process != nil {
+ // we're not going to do anything with the err
+ _ = command.Process.Signal(syscall.SIGABRT)
+ }
+ case <-timeoutCtx.Done():
+ if command.Process != nil {
+ _ = command.Process.Signal(syscall.SIGABRT)
+ }
+ }
+ case <-timeoutCtx.Done():
+ if command.Process != nil {
+ _ = command.Process.Signal(syscall.SIGINT)
+ }
+ }
+ }()
+
+ result := extensiontests.ResultFailed
+ cmdErr := command.Wait()
+
+ subcommandResult, parseErr := newTestResultFromOutput(stdout)
+ if parseErr == nil {
+ // even if we have a cmdErr, if we were able to parse the result, trust the output
+ return subcommandResult
+ }
+
+ fmt.Fprintf(stderr, "Command Error: %v\n", cmdErr)
+ fmt.Fprintf(stderr, "Deserializaion Error: %v\n", parseErr)
+ return newTestResult(testName, result, start, time.Now(), stdout, stderr)
+}
+
+func newTestResultFromOutput(stdout *bytes.Buffer) (*extensiontests.ExtensionTestResult, error) {
+ if len(stdout.Bytes()) == 0 {
+ return nil, errors.New("no output from command")
+ }
+
+ // when the command runs correctly, we get json or json slice output
+ retArray := []extensiontests.ExtensionTestResult{}
+ if arrayItemErr := json.Unmarshal(stdout.Bytes(), &retArray); arrayItemErr == nil {
+ if len(retArray) != 1 {
+ return nil, errors.New("expected 1 result, got %v results")
+ }
+ return &retArray[0], nil
+ }
+
+ // when the command runs correctly, we get json output
+ ret := &extensiontests.ExtensionTestResult{}
+ if singleItemErr := json.Unmarshal(stdout.Bytes(), ret); singleItemErr != nil {
+ return nil, singleItemErr
+ }
+
+ return ret, nil
+}
+
+func newTestResult(name string, result extensiontests.Result, start, end time.Time, stdout, stderr *bytes.Buffer) *extensiontests.ExtensionTestResult {
+ duration := end.Sub(start)
+ dbStart := dbtime.DBTime(start)
+ dbEnd := dbtime.DBTime(start)
+ ret := &extensiontests.ExtensionTestResult{
+ Name: name,
+ Lifecycle: "", // lifecycle is completed one level above this.
+ Duration: int64(duration),
+ StartTime: &dbStart,
+ EndTime: &dbEnd,
+ Result: result,
+ Details: nil,
+ }
+
+ if stdout != nil && stderr != nil {
+ stdoutStr := stdout.String()
+ stderrStr := stderr.String()
+
+ ret.Output = fmt.Sprintf("STDOUT:\n%s\n\nSTDERR:\n%s\n", stdoutStr, stderrStr)
+
+ // try to choose the best summary
+ switch {
+ case len(stderrStr) > 0 && len(stderrStr) < 5000:
+ ret.Error = stderrStr
+ case len(stderrStr) > 0 && len(stderrStr) >= 5000:
+ ret.Error = stderrStr[len(stderrStr)-5000:]
+
+ case len(stdoutStr) > 0 && len(stdoutStr) < 5000:
+ ret.Error = stdoutStr
+ case len(stdoutStr) > 0 && len(stdoutStr) >= 5000:
+ ret.Error = stdoutStr[len(stdoutStr)-5000:]
+ }
+ }
+
+ return ret
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go
new file mode 100644
index 000000000..e970d46ad
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go
@@ -0,0 +1,229 @@
+package ginkgo
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/ginkgo/v2/types"
+ "github.com/onsi/gomega"
+ "github.com/pkg/errors"
+
+ "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets"
+
+ ext "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests"
+)
+
+func configureGinkgo() (*types.SuiteConfig, *types.ReporterConfig, error) {
+ if !ginkgo.GetSuite().InPhaseBuildTree() {
+ if err := ginkgo.GetSuite().BuildTree(); err != nil {
+ return nil, nil, errors.Wrapf(err, "couldn't build ginkgo tree")
+ }
+ }
+
+ // Ginkgo initialization
+ ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes()
+ suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
+ suiteConfig.RandomizeAllSpecs = true
+ suiteConfig.Timeout = 24 * time.Hour
+ reporterConfig.NoColor = true
+ reporterConfig.Verbose = true
+ ginkgo.SetReporterConfig(reporterConfig)
+
+ // Write output to Stderr
+ ginkgo.GinkgoWriter = ginkgo.NewWriter(os.Stderr)
+ ginkgo.GinkgoLogr = GinkgoLogrFunc(ginkgo.GinkgoWriter)
+
+ gomega.RegisterFailHandler(ginkgo.Fail)
+
+ return &suiteConfig, &reporterConfig, nil
+}
+
+// BuildExtensionTestSpecsFromOpenShiftGinkgoSuite generates OTE specs for Gingko tests. While OTE isn't limited to
+// calling ginkgo tests, anything that implements the ExtensionTestSpec interface can be used, it's the most common
+// course of action. The typical use case is to omit selectFns, but if provided, these will filter the returned list
+// of specs, applied in the order provided.
+func BuildExtensionTestSpecsFromOpenShiftGinkgoSuite(selectFns ...ext.SelectFunction) (ext.ExtensionTestSpecs, error) {
+ var specs ext.ExtensionTestSpecs
+ var enforceSerialExecutionForGinkgo sync.Mutex // in-process parallelization for ginkgo is impossible so far
+
+ if _, _, err := configureGinkgo(); err != nil {
+ return nil, err
+ }
+
+ cwd, err := os.Getwd()
+ if err != nil {
+ return nil, errors.Wrap(err, "couldn't get current working directory")
+ }
+
+ ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) {
+ var codeLocations []string
+ for _, cl := range spec.CodeLocations() {
+ codeLocations = append(codeLocations, cl.String())
+ }
+
+ testCase := &ext.ExtensionTestSpec{
+ Name: spec.Text(),
+ Labels: sets.New[string](spec.Labels()...),
+ CodeLocations: codeLocations,
+ Lifecycle: GetLifecycle(spec.Labels()),
+ Run: func(ctx context.Context) *ext.ExtensionTestResult {
+ enforceSerialExecutionForGinkgo.Lock()
+ defer enforceSerialExecutionForGinkgo.Unlock()
+
+ suiteConfig, reporterConfig, _ := configureGinkgo()
+
+ result := &ext.ExtensionTestResult{
+ Name: spec.Text(),
+ }
+
+ var summary types.SpecReport
+ ginkgo.GetSuite().RunSpec(spec, ginkgo.Labels{}, "", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), *suiteConfig,
+ *reporterConfig)
+ for _, report := range ginkgo.GetSuite().GetReport().SpecReports {
+ if report.NumAttempts > 0 {
+ summary = report
+ }
+ }
+
+ result.Output = summary.CapturedGinkgoWriterOutput
+ result.Error = summary.CapturedStdOutErr
+
+ switch {
+ case summary.State == types.SpecStatePassed:
+ result.Result = ext.ResultPassed
+ case summary.State == types.SpecStateSkipped, summary.State == types.SpecStatePending:
+ result.Result = ext.ResultSkipped
+ if len(summary.Failure.Message) > 0 {
+ result.Output = fmt.Sprintf(
+ "%s\n skip [%s:%d]: %s\n",
+ result.Output,
+ lastFilenameSegment(summary.Failure.Location.FileName),
+ summary.Failure.Location.LineNumber,
+ summary.Failure.Message,
+ )
+ } else if len(summary.Failure.ForwardedPanic) > 0 {
+ result.Output = fmt.Sprintf(
+ "%s\n skip [%s:%d]: %s\n",
+ result.Output,
+ lastFilenameSegment(summary.Failure.Location.FileName),
+ summary.Failure.Location.LineNumber,
+ summary.Failure.ForwardedPanic,
+ )
+ }
+ case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted, summary.State == types.SpecStateAborted:
+ result.Result = ext.ResultFailed
+ var errors []string
+ if len(summary.Failure.ForwardedPanic) > 0 {
+ if len(summary.Failure.Location.FullStackTrace) > 0 {
+ errors = append(errors, fmt.Sprintf("\n%s\n", summary.Failure.Location.FullStackTrace))
+ }
+ errors = append(errors, fmt.Sprintf("fail [%s:%d]: Test Panicked: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic))
+ }
+ errors = append(errors, fmt.Sprintf("fail [%s:%d]: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message))
+ result.Error = strings.Join(errors, "\n")
+ case summary.State == types.SpecStateTimedout:
+ result.Result = ext.ResultFailed
+ var errors []string
+ for _, additionalFailure := range summary.AdditionalFailures {
+ collectAdditionalFailures(&errors, " ", additionalFailure.Failure)
+ }
+ if summary.Failure.AdditionalFailure != nil {
+ collectAdditionalFailures(&errors, " ", summary.Failure.AdditionalFailure.Failure)
+ }
+ errors = append(errors, fmt.Sprintf("fail [%s:%d]: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message))
+ result.Error = strings.Join(errors, "\n")
+ default:
+ panic(fmt.Sprintf("test produced unknown outcome: %#v", summary))
+ }
+
+ return result
+ },
+ RunParallel: func(ctx context.Context) *ext.ExtensionTestResult {
+ // TODO pass through timeout and determine Lifecycle
+ return SpawnProcessToRunTest(ctx, name, 90*time.Minute)
+ },
+ }
+ specs = append(specs, testCase)
+ })
+
+ // Default select function is to exclude vendored specs. When relying on Kubernetes test framework for its helpers,
+ // it also unfortunately ends up importing *all* Gingko specs. This is unsafe: it would potentially override the
+ // kube specs already present in origin. The best course of action is enforce this behavior on everyone. If for
+ // some reason, you must include vendored specs, you can opt-in directly by supplying your own SelectFunctions or using
+ // AllTestsIncludedVendored().
+ if len(selectFns) == 0 {
+ selectFns = []ext.SelectFunction{ext.ModuleTestsOnly()}
+ }
+
+ for _, selectFn := range selectFns {
+ specs = specs.Select(selectFn)
+ }
+
+ return specs, nil
+}
+
+func Informing() ginkgo.Labels {
+ return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleInforming))
+}
+
+func Slow() ginkgo.Labels {
+ return ginkgo.Label("SLOW")
+}
+
+func Blocking() ginkgo.Labels {
+ return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleBlocking))
+}
+
+func GetLifecycle(labels ginkgo.Labels) ext.Lifecycle {
+ for _, label := range labels {
+ res := strings.Split(label, ":")
+ if len(res) != 2 || !strings.EqualFold(res[0], "lifecycle") {
+ continue
+ }
+ return MustLifecycle(res[1]) // this panics if unsupported lifecycle is used
+ }
+
+ return ext.LifecycleBlocking
+}
+
+func MustLifecycle(l string) ext.Lifecycle {
+ switch ext.Lifecycle(l) {
+ case ext.LifecycleInforming, ext.LifecycleBlocking:
+ return ext.Lifecycle(l)
+ default:
+ panic(fmt.Sprintf("unknown test lifecycle: %s", l))
+ }
+}
+
+func lastFilenameSegment(filename string) string {
+ if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ if parts := strings.Split(filename, "/src/"); len(parts) > 1 {
+ return parts[len(parts)-1]
+ }
+ return filename
+}
+
+func collectAdditionalFailures(errors *[]string, suffix string, failure types.Failure) {
+ if failure.IsZero() {
+ return
+ }
+
+ if len(failure.ForwardedPanic) > 0 {
+ if len(failure.Location.FullStackTrace) > 0 {
+ *errors = append(*errors, fmt.Sprintf("\n%s\n", failure.Location.FullStackTrace))
+ }
+ *errors = append(*errors, fmt.Sprintf("fail [%s:%d]: Test Panicked: %s%s", lastFilenameSegment(failure.Location.FileName), failure.Location.LineNumber, failure.ForwardedPanic, suffix))
+ }
+ *errors = append(*errors, fmt.Sprintf("fail [%s:%d] %s%s", lastFilenameSegment(failure.Location.FileName), failure.Location.LineNumber, failure.Message, suffix))
+
+ if failure.AdditionalFailure != nil {
+ collectAdditionalFailures(errors, " ", failure.AdditionalFailure.Failure)
+ }
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/junit/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/junit/types.go
new file mode 100644
index 000000000..0309fbd51
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/junit/types.go
@@ -0,0 +1,104 @@
+package junit
+
+import (
+ "encoding/xml"
+)
+
+// The below types are directly marshalled into XML. The types correspond to jUnit
+// XML schema, but do not contain all valid fields. For instance, the class name
+// field for test cases is omitted, as this concept does not directly apply to Go.
+// For XML specifications see http://help.catchsoftware.com/display/ET/JUnit+Format
+// or view the XSD included in this package as 'junit.xsd'
+
+// TestSuites represents a flat collection of jUnit test suites.
+type TestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+
+ // Suites are the jUnit test suites held in this collection
+ Suites []*TestSuite `xml:"testsuite"`
+}
+
+// TestSuite represents a single jUnit test suite, potentially holding child suites.
+type TestSuite struct {
+ XMLName xml.Name `xml:"testsuite"`
+
+ // Name is the name of the test suite
+ Name string `xml:"name,attr"`
+
+ // NumTests records the number of tests in the TestSuite
+ NumTests uint `xml:"tests,attr"`
+
+ // NumSkipped records the number of skipped tests in the suite
+ NumSkipped uint `xml:"skipped,attr"`
+
+ // NumFailed records the number of failed tests in the suite
+ NumFailed uint `xml:"failures,attr"`
+
+ // Duration is the time taken in seconds to run all tests in the suite
+ Duration float64 `xml:"time,attr"`
+
+ // Properties holds other properties of the test suite as a mapping of name to value
+ Properties []*TestSuiteProperty `xml:"properties,omitempty"`
+
+ // TestCases are the test cases contained in the test suite
+ TestCases []*TestCase `xml:"testcases"`
+
+ // Children holds nested test suites
+ Children []*TestSuite `xml:"testsuites"` //nolint
+}
+
+// TestSuiteProperty contains a mapping of a property name to a value
+type TestSuiteProperty struct {
+ XMLName xml.Name `xml:"properties"`
+
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
+
+// TestCase represents a jUnit test case
+type TestCase struct {
+ XMLName xml.Name `xml:"testcase"`
+
+ // Name is the name of the test case
+ Name string `xml:"name,attr"`
+
+ // Classname is an attribute set by the package type and is required
+ Classname string `xml:"classname,attr,omitempty"`
+
+ // Duration is the time taken in seconds to run the test
+ Duration float64 `xml:"time,attr"`
+
+ // SkipMessage holds the reason why the test was skipped
+ SkipMessage *SkipMessage `xml:"skipped"`
+
+ // FailureOutput holds the output from a failing test
+ FailureOutput *FailureOutput `xml:"failure"`
+
+ // SystemOut is output written to stdout during the execution of this test case
+ SystemOut string `xml:"system-out,omitempty"`
+
+ // SystemErr is output written to stderr during the execution of this test case
+ SystemErr string `xml:"system-err,omitempty"`
+}
+
+// SkipMessage holds a message explaining why a test was skipped
+type SkipMessage struct {
+ XMLName xml.Name `xml:"skipped"`
+
+ // Message explains why the test was skipped
+ Message string `xml:"message,attr,omitempty"`
+}
+
+// FailureOutput holds the output from a failing test
+type FailureOutput struct {
+ XMLName xml.Name `xml:"failure"`
+
+ // Message holds the failure message from the test
+ Message string `xml:"message,attr"`
+
+ // Output holds verbose failure output from the test
+ Output string `xml:",chardata"`
+}
+
+// TestResult is the result of a test case
+type TestResult string
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md
new file mode 100644
index 000000000..1a5def772
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md
@@ -0,0 +1,3 @@
+This package is copy/pasted from [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery/tree/master/pkg/util/sets)
+to avoid a circular dependency with `openshift/kubernetes` as it requires OTE and, without having done this,
+OTE would require `kubernetes/kubernetes`.
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go
new file mode 100644
index 000000000..4d7a17c3a
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[byte]{}
+// s2 := New[byte]()
+type Byte map[byte]Empty
+
+// NewByte creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+ return Byte(New[byte](items...))
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet[T any](theMap map[byte]T) Byte {
+ return Byte(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) Byte {
+ return Byte(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) Byte {
+ return Byte(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Byte) Clone() Byte {
+ return Byte(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Byte) Difference(s2 Byte) Byte {
+ return Byte(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Byte) SymmetricDifference(s2 Byte) Byte {
+ return Byte(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+ return Byte(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+ return Byte(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Byte) UnsortedList() []byte {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go
new file mode 100644
index 000000000..997f5e033
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package sets has generic set and specified sets. Generic set will
+// replace specified ones over time. And specific ones are deprecated.
+package sets // import "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets"
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go
new file mode 100644
index 000000000..fbb1df06d
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go
new file mode 100644
index 000000000..5876fc9de
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int]{}
+// s2 := New[int]()
+type Int map[int]Empty
+
+// NewInt creates a Int from a list of values.
+func NewInt(items ...int) Int {
+ return Int(New[int](items...))
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet[T any](theMap map[int]T) Int {
+ return Int(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) Int {
+ return Int(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) Int {
+ return Int(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int) Clone() Int {
+ return Int(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int) Difference(s2 Int) Int {
+ return Int(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int) SymmetricDifference(s2 Int) Int {
+ return Int(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+ return Int(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+ return Int(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int) UnsortedList() []int {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go
new file mode 100644
index 000000000..2c640c5d0
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int32]{}
+// s2 := New[int32]()
+type Int32 map[int32]Empty
+
+// NewInt32 creates a Int32 from a list of values.
+func NewInt32(items ...int32) Int32 {
+ return Int32(New[int32](items...))
+}
+
+// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int32KeySet[T any](theMap map[int32]T) Int32 {
+ return Int32(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int32) Insert(items ...int32) Int32 {
+ return Int32(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int32) Delete(items ...int32) Int32 {
+ return Int32(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int32) Has(item int32) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int32) HasAll(items ...int32) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int32) HasAny(items ...int32) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int32) Clone() Int32 {
+ return Int32(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int32) Difference(s2 Int32) Int32 {
+ return Int32(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int32) SymmetricDifference(s2 Int32) Int32 {
+ return Int32(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int32) Union(s2 Int32) Int32 {
+ return Int32(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int32) Intersection(s2 Int32) Int32 {
+ return Int32(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int32) IsSuperset(s2 Int32) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int32) Equal(s2 Int32) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int32 slice.
+func (s Int32) List() []int32 {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int32) UnsortedList() []int32 {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int32) PopAny() (int32, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int32) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go
new file mode 100644
index 000000000..bf3eb3ffa
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[int64]{}
+// s2 := New[int64]()
+type Int64 map[int64]Empty
+
+// NewInt64 creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+ return Int64(New[int64](items...))
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet[T any](theMap map[int64]T) Int64 {
+ return Int64(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) Int64 {
+ return Int64(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) Int64 {
+ return Int64(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Int64) Clone() Int64 {
+ return Int64(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Int64) Difference(s2 Int64) Int64 {
+ return Int64(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Int64) SymmetricDifference(s2 Int64) Int64 {
+ return Int64(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+ return Int64(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+ return Int64(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int64) UnsortedList() []int64 {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go
new file mode 100644
index 000000000..cd961c8c5
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+import (
+ "cmp"
+ "sort"
+)
+
+// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption.
+type Set[T comparable] map[T]Empty
+
+// cast transforms specified set to generic Set[T].
+func cast[T comparable](s map[T]Empty) Set[T] { return s }
+
+// New creates a Set from a list of values.
+// NOTE: type param must be explicitly instantiated if given items are empty.
+func New[T comparable](items ...T) Set[T] {
+ ss := make(Set[T], len(items))
+ ss.Insert(items...)
+ return ss
+}
+
+// KeySet creates a Set from a keys of a map[comparable](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func KeySet[T comparable, V any](theMap map[T]V) Set[T] {
+ ret := make(Set[T], len(theMap))
+ for keyValue := range theMap {
+ ret.Insert(keyValue)
+ }
+ return ret
+}
+
+// Insert adds items to the set.
+func (s Set[T]) Insert(items ...T) Set[T] {
+ for _, item := range items {
+ s[item] = Empty{}
+ }
+ return s
+}
+
+func Insert[T comparable](set Set[T], items ...T) Set[T] {
+ return set.Insert(items...)
+}
+
+// Delete removes all items from the set.
+func (s Set[T]) Delete(items ...T) Set[T] {
+ for _, item := range items {
+ delete(s, item)
+ }
+ return s
+}
+
+// Clear empties the set.
+// It is preferable to replace the set with a newly constructed set,
+// but not all callers can do that (when there are other references to the map).
+func (s Set[T]) Clear() Set[T] {
+ clear(s)
+ return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Set[T]) Has(item T) bool {
+ _, contained := s[item]
+ return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Set[T]) HasAll(items ...T) bool {
+ for _, item := range items {
+ if !s.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Set[T]) HasAny(items ...T) bool {
+ for _, item := range items {
+ if s.Has(item) {
+ return true
+ }
+ }
+ return false
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s Set[T]) Clone() Set[T] {
+ result := make(Set[T], len(s))
+ for key := range s {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 Set[T]) Difference(s2 Set[T]) Set[T] {
+ result := New[T]()
+ for key := range s1 {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] {
+ return s1.Difference(s2).Union(s2.Difference(s1))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Set[T]) Union(s2 Set[T]) Set[T] {
+ result := s1.Clone()
+ for key := range s2 {
+ result.Insert(key)
+ }
+ return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] {
+ var walk, other Set[T]
+ result := New[T]()
+ if s1.Len() < s2.Len() {
+ walk = s1
+ other = s2
+ } else {
+ walk = s2
+ other = s1
+ }
+ for key := range walk {
+ if other.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Set[T]) IsSuperset(s2 Set[T]) bool {
+ for item := range s2 {
+ if !s1.Has(item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Set[T]) Equal(s2 Set[T]) bool {
+ return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfGeneric[T cmp.Ordered] []T
+
+func (g sortableSliceOfGeneric[T]) Len() int { return len(g) }
+func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) }
+func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
+
+// List returns the contents as a sorted T slice.
+//
+// This is a separate function and not a method because not all types supported
+// by Generic are ordered and only those can be sorted.
+func List[T cmp.Ordered](s Set[T]) []T {
+ res := make(sortableSliceOfGeneric[T], 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ sort.Sort(res)
+ return res
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Set[T]) UnsortedList() []T {
+ res := make([]T, 0, len(s))
+ for key := range s {
+ res = append(res, key)
+ }
+ return res
+}
+
+// PopAny returns a single element from the set.
+func (s Set[T]) PopAny() (T, bool) {
+ for key := range s {
+ s.Delete(key)
+ return key, true
+ }
+ var zeroValue T
+ return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Set[T]) Len() int {
+ return len(s)
+}
+
+func less[T cmp.Ordered](lhs, rhs T) bool {
+ return lhs < rhs
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go
new file mode 100644
index 000000000..1dab6d13c
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sets
+
+// String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+//
+// Deprecated: use generic Set instead.
+// new ways:
+// s1 := Set[string]{}
+// s2 := New[string]()
+type String map[string]Empty
+
+// NewString creates a String from a list of values.
+func NewString(items ...string) String {
+ return String(New[string](items...))
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet[T any](theMap map[string]T) String {
+ return String(KeySet(theMap))
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) String {
+ return String(cast(s).Insert(items...))
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) String {
+ return String(cast(s).Delete(items...))
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+ return cast(s).Has(item)
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+ return cast(s).HasAll(items...)
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+ return cast(s).HasAny(items...)
+}
+
+// Clone returns a new set which is a copy of the current set.
+func (s String) Clone() String {
+ return String(cast(s).Clone())
+}
+
+// Difference returns a set of objects that are not in s2.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s1 String) Difference(s2 String) String {
+ return String(cast(s1).Difference(cast(s2)))
+}
+
+// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.SymmetricDifference(s2) = {a3, a4, a5}
+// s2.SymmetricDifference(s1) = {a3, a4, a5}
+func (s1 String) SymmetricDifference(s2 String) String {
+ return String(cast(s1).SymmetricDifference(cast(s2)))
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+ return String(cast(s1).Union(cast(s2)))
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+ return String(cast(s1).Intersection(cast(s2)))
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+ return cast(s1).IsSuperset(cast(s2))
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+ return cast(s1).Equal(cast(s2))
+}
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+ return List(cast(s))
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s String) UnsortedList() []string {
+ return cast(s).UnsortedList()
+}
+
+// PopAny returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+ return cast(s).PopAny()
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go
new file mode 100644
index 000000000..7d6a3309b
--- /dev/null
+++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go
@@ -0,0 +1,11 @@
+package version
+
+var (
+ // CommitFromGit is a constant representing the source version that
+ // generated this build. It should be set during build via -ldflags.
+ CommitFromGit string
+ // BuildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ BuildDate string
+ // GitTreeState has the state of git tree, either "clean" or "dirty"
+ GitTreeState string
+)
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 000000000..c7b459e4d
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,39 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+cobra.test
+bin
+
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 000000000..2c8f4808c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,57 @@
+# Copyright 2013-2023 The Cobra Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ # - deadcode ! deprecated since v1.49.0; replaced by 'unused'
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ #- gochecknoinits
+ - goconst
+ - gocritic
+ #- gocyclo
+ - gofmt
+ - goimports
+ #- gomnd
+ #- goprintffuncname
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ #- lll
+ - misspell
+ #- nakedret
+ #- noctx
+ - nolintlint
+ #- rowserrcheck
+ #- scopelint
+ - staticcheck
+ #- structcheck ! deprecated since v1.49.0; replaced by 'unused'
+ - stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ - unused
+ # - varcheck ! deprecated since v1.49.0; replaced by 'unused'
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 000000000..94ec53068
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia
+Bjørn Erik Pedersen
+Fabiano Franz
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 000000000..9d16f88fd
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
new file mode 100644
index 000000000..6f356e6a8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# Contributing to Cobra
+
+Thank you so much for contributing to Cobra. We appreciate your time and help.
+Here are some guidelines to help you get started.
+
+## Code of Conduct
+
+Be kind and respectful to the members of the community. Take time to educate
+others who are seeking help. Harassment of any kind will not be tolerated.
+
+## Questions
+
+If you have questions regarding Cobra, feel free to ask it in the community
+[#cobra Slack channel][cobra-slack]
+
+## Filing a bug or feature
+
+1. Before filing an issue, please check the existing issues to see if a
+ similar one was already opened. If there is one already opened, feel free
+ to comment on it.
+1. If you believe you've found a bug, please provide detailed steps of
+ reproduction, the version of Cobra and anything else you believe will be
+ useful to help troubleshoot it (e.g. OS environment, environment variables,
+ etc...). Also state the current behavior vs. the expected behavior.
+1. If you'd like to see a feature or an enhancement please open an issue with
+ a clear title and description of what the feature is and why it would be
+ beneficial to the project and its users.
+
+## Submitting changes
+
+1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
+ sign a CLA. Please sign the CLA :slightly_smiling_face:
+1. Tests: If you are submitting code, please ensure you have adequate tests
+ for the feature. Tests can be run via `go test ./...` or `make test`.
+1. Since this is golang project, ensure the new code is properly formatted to
+ ensure code consistency. Run `make all`.
+
+### Quick steps to contribute
+
+1. Fork the project.
+1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+1. Create your feature branch (`git checkout -b my-new-feature`)
+1. Make changes and run tests (`make test`)
+1. Add them to staging (`git add .`)
+1. Commit your changes (`git commit -m 'Add some feature'`)
+1. Push to the branch (`git push origin my-new-feature`)
+1. Create new pull request
+
+
+[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS
new file mode 100644
index 000000000..4c5ac3dd9
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/MAINTAINERS
@@ -0,0 +1,13 @@
+maintainers:
+- spf13
+- johnSchnake
+- jpmcb
+- marckhouzam
+inactive:
+- anthonyfok
+- bep
+- bogem
+- broady
+- eparis
+- jharshman
+- wfernandes
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 000000000..0da8d7aa0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,35 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
+.PHONY: fmt lint test install_deps clean
+
+default: all
+
+all: fmt test
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps
+ $(info ******************** running tests ********************)
+ go test -v ./...
+
+richtest: install_deps
+ $(info ******************** running tests with kyoh86/richgo ********************)
+ richgo test -v ./...
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 000000000..6444f4b7f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,112 @@
+
+
+Cobra is a library for creating powerful modern CLI applications.
+
+Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
+[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
+name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+
+[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
+[](https://pkg.go.dev/github.com/spf13/cobra)
+[](https://goreportcard.com/report/github.com/spf13/cobra)
+[](https://gophers.slack.com/archives/CD3LP1199)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Grouping help for subcommands
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE`
+ or
+`APPNAME COMMAND ARG --FLAG`.
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+ hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+ git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library.
+
+```
+go get -u github.com/spf13/cobra@latest
+```
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Usage
+`cobra-cli` is a command line program to generate cobra applications and command files.
+It will bootstrap your application scaffolding to rapidly
+develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
+
+It can be installed by running:
+
+```
+go install github.com/spf13/cobra-cli@latest
+```
+
+For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
+
+For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md).
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go
new file mode 100644
index 000000000..25c30e3cc
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/active_help.go
@@ -0,0 +1,60 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ activeHelpMarker = "_activeHelp_ "
+ // The below values should not be changed: programs will be using them explicitly
+ // in their user documentation, and users will be using them explicitly.
+ activeHelpEnvVarSuffix = "ACTIVE_HELP"
+ activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix
+ activeHelpGlobalDisable = "0"
+)
+
+// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
+// Such strings will be processed by the completion script and will be shown as ActiveHelp
+// to the user.
+// The array parameter should be the array that will contain the completions.
+// This function can be called multiple times before and/or after completions are added to
+// the array. Each time this function is called with the same array, the new
+// ActiveHelp line will be shown below the previous ones when completion is triggered.
+func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
+ return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
+}
+
+// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
+// _ACTIVE_HELP where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
+// is set to "0".
+func GetActiveHelpConfig(cmd *Command) string {
+ activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
+ if activeHelpCfg != activeHelpGlobalDisable {
+ activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
+ }
+ return activeHelpCfg
+}
+
+// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
+// variable. It has the format _ACTIVE_HELP where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func activeHelpEnvVar(name string) string {
+ return configEnvVar(name, activeHelpEnvVarSuffix)
+}
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 000000000..ed1e70cea
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,131 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// legacyArgs validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+ // no subcommand, always take args
+ if !cmd.HasSubCommands() {
+ return nil
+ }
+
+ // root command with subcommands, do subcommand checking.
+ if !cmd.HasParent() && len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+ if len(args) > 0 {
+ return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+ }
+ return nil
+}
+
+// OnlyValidArgs returns an error if there are any positional args that are not in
+// the `ValidArgs` field of `Command`
+func OnlyValidArgs(cmd *Command, args []string) error {
+ if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ validArgs := make([]string, 0, len(cmd.ValidArgs))
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0])
+ }
+ for _, v := range args {
+ if !stringInSlice(v, validArgs) {
+ return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+ }
+ }
+ }
+ return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+ return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < n {
+ return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) > n {
+ return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) != n {
+ return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+ }
+ return nil
+ }
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ if len(args) < min || len(args) > max {
+ return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+ }
+ return nil
+ }
+}
+
+// MatchAll allows combining several PositionalArgs to work in concert.
+func MatchAll(pargs ...PositionalArgs) PositionalArgs {
+ return func(cmd *Command, args []string) error {
+ for _, parg := range pargs {
+ if err := parg(cmd, args); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// ExactValidArgs returns an error if there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+//
+// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead
+func ExactValidArgs(n int) PositionalArgs {
+ return MatchAll(ExactArgs(n), OnlyValidArgs)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 000000000..f4d198cbc
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,709 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+ BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
+ BashCompCustom = "cobra_annotation_bash_completion_custom"
+ BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+ BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+ local w word=$1
+ shift
+ index=0
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ index=$((index+1))
+ done
+ index=-1
+}
+
+__%[1]s_contains_word()
+{
+ local w word=$1; shift
+ for w in "$@"; do
+ [[ $w = "$word" ]] && return
+ done
+ return 1
+}
+
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ # Disable ActiveHelp which is not supported for bash completion v1
+ requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+ # Do not use quotes around the $out variable or else newline
+ # characters will be kept.
+ for filter in ${out}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ # Use printf to strip any trailing newline
+ subdir=$(printf "%%s" "${out}")
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ __%[1]s_handle_subdirs_in_dir_flag "$subdir"
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out}" -- "$cur")
+ fi
+}
+
+__%[1]s_handle_reply()
+{
+ __%[1]s_debug "${FUNCNAME[0]}"
+ local comp
+ case $cur in
+ -*)
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt -o nospace
+ fi
+ local allflags
+ if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+ allflags=("${must_have_one_flag[@]}")
+ else
+ allflags=("${flags[*]} ${two_word_flags[*]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+ fi
+
+ # complete after --flag=abc
+ if [[ $cur == *=* ]]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ compopt +o nospace
+ fi
+
+ local index flag
+ flag="${cur%%=*}"
+ __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+ COMPREPLY=()
+ if [[ ${index} -ge 0 ]]; then
+ PREFIX=""
+ cur="${cur#*=}"
+ ${flags_completion[${index}]}
+ if [ -n "${ZSH_VERSION:-}" ]; then
+ # zsh completion needs --flag= prefix
+ eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+ fi
+ fi
+ fi
+
+ if [[ -z "${flag_parsing_disabled}" ]]; then
+ # If flag parsing is enabled, we have completed the flags and can return.
+ # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
+ # to possibly call handle_go_custom_completion.
+ return 0;
+ fi
+ ;;
+ esac
+
+ # check if we are handling a flag with special work handling
+ local index
+ __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+ if [[ ${index} -ge 0 ]]; then
+ ${flags_completion[${index}]}
+ return
+ fi
+
+ # we are parsing a flag and don't have a special handler, no completion
+ if [[ ${cur} != "${words[cword]}" ]]; then
+ return
+ fi
+
+ local completions
+ completions=("${commands[@]}")
+ if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ __%[1]s_handle_go_custom_completion
+ fi
+ if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+ completions+=("${must_have_one_flag[@]}")
+ fi
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
+
+ if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
+ fi
+
+ if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+ if declare -F __%[1]s_custom_func >/dev/null; then
+ # try command name qualified custom func
+ __%[1]s_custom_func
+ else
+ # otherwise fall back to unqualified for compatibility
+ declare -F __custom_func >/dev/null && __custom_func
+ fi
+ fi
+
+ # available in bash-completion >= 2, not always present on macOS
+ if declare -F __ltrim_colon_completions >/dev/null; then
+ __ltrim_colon_completions "$cur"
+ fi
+
+ # If there is only 1 completion and it is a flag with an = it will be completed
+ # but we don't want a space after the =
+ if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+ compopt -o nospace
+ fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+ local ext="$1"
+ _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+ local dir="$1"
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+}
+
+__%[1]s_handle_flag()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ # if a command required a flag, and we found it, unset must_have_one_flag()
+ local flagname=${words[c]}
+ local flagvalue=""
+ # if the word contained an =
+ if [[ ${words[c]} == *"="* ]]; then
+ flagvalue=${flagname#*=} # take in as flagvalue after the =
+ flagname=${flagname%%=*} # strip everything after the =
+ flagname="${flagname}=" # but put the = back
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+ if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+ must_have_one_flag=()
+ fi
+
+ # if you set a flag which only applies to this command, don't show subcommands
+ if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+ commands=()
+ fi
+
+ # keep flag value with flagname as flaghash
+ # flaghash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ if [ -n "${flagvalue}" ] ; then
+ flaghash[${flagname}]=${flagvalue}
+ elif [ -n "${words[ $((c+1)) ]}" ] ; then
+ flaghash[${flagname}]=${words[ $((c+1)) ]}
+ else
+ flaghash[${flagname}]="true" # pad "true" for bool flag
+ fi
+ fi
+
+ # skip the argument to a two word flag
+ if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+ __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+ c=$((c+1))
+ # if we are looking for a flags value, don't show commands
+ if [[ $c -eq $cword ]]; then
+ commands=()
+ fi
+ fi
+
+ c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+ must_have_one_noun=()
+ elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+ must_have_one_noun=()
+ fi
+
+ nouns+=("${words[c]}")
+ c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+ local next_command
+ if [[ -n ${last_command} ]]; then
+ next_command="_${last_command}_${words[c]//:/__}"
+ else
+ if [[ $c -eq 0 ]]; then
+ next_command="_%[1]s_root_command"
+ else
+ next_command="_${words[c]//:/__}"
+ fi
+ fi
+ c=$((c+1))
+ __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+ declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+ if [[ $c -ge $cword ]]; then
+ __%[1]s_handle_reply
+ return
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+ if [[ "${words[c]}" == -* ]]; then
+ __%[1]s_handle_flag
+ elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+ __%[1]s_handle_command
+ elif [[ $c -eq 0 ]]; then
+ __%[1]s_handle_command
+ elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+ # aliashash variable is an associative array which is only supported in bash > 3.
+ if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
+ words[c]=${aliashash[${words[c]}]}
+ __%[1]s_handle_command
+ else
+ __%[1]s_handle_noun
+ fi
+ else
+ __%[1]s_handle_noun
+ fi
+ __%[1]s_handle_word
+}
+
+`, name, ShellCompNoDescRequestCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
+}
+
+func writePostscript(buf io.StringWriter, name string) {
+ name = strings.ReplaceAll(name, ":", "__")
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
+ local cur prev words cword split
+ declare -A flaghash 2>/dev/null || :
+ declare -A aliashash 2>/dev/null || :
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -s || return
+ else
+ __%[1]s_init_completion -n "=" || return
+ fi
+
+ local c=0
+ local flag_parsing_disabled=
+ local flags=()
+ local two_word_flags=()
+ local local_nonpersistent_flags=()
+ local flags_with_completion=()
+ local flags_completion=()
+ local commands=("%[1]s")
+ local command_aliases=()
+ local must_have_one_flag=()
+ local must_have_one_noun=()
+ local has_completion_function=""
+ local last_command=""
+ local nouns=()
+ local noun_aliases=()
+
+ __%[1]s_handle_word
+}
+
+`, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%s %s
+else
+ complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ writeCmdAliases(buf, c)
+ }
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
+ for key, value := range annotations {
+ switch key {
+ case BashCompFilenameExt:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) > 0 {
+ ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+ } else {
+ ext = "_filedir"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ case BashCompCustom:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ if len(value) > 0 {
+ handlers := strings.Join(value, "; ")
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ } else {
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
+ }
+ case BashCompSubdirsInDir:
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
+ var ext string
+ if len(value) == 1 {
+ ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+ } else {
+ ext = "_filedir -d"
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ }
+ }
+}
+
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Shorthand
+ format := " "
+ if len(flag.NoOptDefVal) == 0 {
+ format += "two_word_"
+ }
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
+ name := flag.Name
+ format := " flags+=(\"--%s"
+ if len(flag.NoOptDefVal) == 0 {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.NoOptDefVal) == 0 {
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ }
+ writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
+ name := flag.Name
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
+ if len(flag.NoOptDefVal) == 0 {
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ }
+}
+
+// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
+func writeFlags(buf io.StringWriter, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
+ WriteStringAndCheck(buf, ` flags=()
+ two_word_flags=()
+ local_nonpersistent_flags=()
+ flags_with_completion=()
+ flags_completion=()
+
+`)
+
+ if cmd.DisableFlagParsing {
+ WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
+ }
+
+ localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+ cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ // localNonPersistentFlags are used to stop the completion of subcommands when one is set
+ // if TraverseChildren is true we should allow to complete subcommands
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
+ writeLocalNonPersistentFlag(buf, flag)
+ }
+ })
+ cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ writeFlag(buf, flag, cmd)
+ if len(flag.Shorthand) > 0 {
+ writeShortFlag(buf, flag, cmd)
+ }
+ })
+
+ WriteStringAndCheck(buf, "\n")
+}
+
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
+ flags := cmd.NonInheritedFlags()
+ flags.VisitAll(func(flag *pflag.Flag) {
+ if nonCompletableFlag(flag) {
+ return
+ }
+ if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok {
+ format := " must_have_one_flag+=(\"--%s"
+ if flag.Value.Type() != "bool" {
+ format += "="
+ }
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
+
+ if len(flag.Shorthand) > 0 {
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
+ }
+ }
+ })
+}
+
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
+ for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.SplitN(value, "\t", 2)[0]
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ }
+ if cmd.ValidArgsFunction != nil {
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
+ }
+}
+
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
+ if len(cmd.Aliases) == 0 {
+ return
+ }
+
+ sort.Strings(cmd.Aliases)
+
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
+ for _, value := range cmd.Aliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ }
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
+}
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
+ for _, value := range cmd.ArgAliases {
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ }
+}
+
+func gen(buf io.StringWriter, cmd *Command) {
+ for _, c := range cmd.Commands() {
+ if !c.IsAvailableCommand() && c != cmd.helpCommand {
+ continue
+ }
+ gen(buf, c)
+ }
+ commandName := cmd.CommandPath()
+ commandName = strings.ReplaceAll(commandName, " ", "_")
+ commandName = strings.ReplaceAll(commandName, ":", "__")
+
+ if cmd.Root() == cmd {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ } else {
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
+
+ writeCommands(buf, cmd)
+ writeFlags(buf, cmd)
+ writeRequiredFlag(buf, cmd)
+ writeRequiredNouns(buf, cmd)
+ writeArgAliases(buf, cmd)
+ WriteStringAndCheck(buf, "}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+ buf := new(bytes.Buffer)
+ writePreamble(buf, c.Name())
+ if len(c.BashCompletionFunction) > 0 {
+ buf.WriteString(c.BashCompletionFunction + "\n")
+ }
+ gen(buf, c)
+ writePostscript(buf, c.Name())
+
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+ return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go
new file mode 100644
index 000000000..1cce5c329
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go
@@ -0,0 +1,396 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genBashComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+
+ WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
+ echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+ fi
+}
+
+# Macs have bash3 for which the bash-completion package doesn't include
+# _init_completion. This is a minimal version of that function.
+__%[1]s_init_completion()
+{
+ COMPREPLY=()
+ _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+# This function calls the %[1]s program to obtain the completion
+# results and the directive. It fills the 'out' and 'directive' vars.
+__%[1]s_get_completion_results() {
+ local requestComp lastParam lastChar args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows handling aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [[ -z ${cur} && ${lastChar} != = ]]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} ''"
+ fi
+
+ # When completing a flag with an = (e.g., %[1]s -n=)
+ # bash focuses on the part after the =, so we need to remove
+ # the flag part from $cur
+ if [[ ${cur} == -*=* ]]; then
+ cur="${cur#*=}"
+ fi
+
+ __%[1]s_debug "Calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [[ ${directive} == "${out}" ]]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "The completion directive is: ${directive}"
+ __%[1]s_debug "The completions are: ${out}"
+}
+
+__%[1]s_process_completion_results() {
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ if (((directive & shellCompDirectiveError) != 0)); then
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ else
+ if (((directive & shellCompDirectiveNoSpace) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no space"
+ compopt -o nospace
+ else
+ __%[1]s_debug "No space directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveKeepOrder) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ # no sort isn't supported for bash less than < 4.4
+ if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ else
+ __%[1]s_debug "Activating keep order"
+ compopt -o nosort
+ fi
+ else
+ __%[1]s_debug "No sort directive not supported in this version of bash"
+ fi
+ fi
+ if (((directive & shellCompDirectiveNoFileComp) != 0)); then
+ if [[ $(type -t compopt) == builtin ]]; then
+ __%[1]s_debug "Activating no file completion"
+ compopt +o default
+ else
+ __%[1]s_debug "No file completion directive not supported in this version of bash"
+ fi
+ fi
+ fi
+
+ # Separate activeHelp from normal completions
+ local completions=()
+ local activeHelp=()
+ __%[1]s_extract_activeHelp
+
+ if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
+ # File extension filtering
+ local fullFilter filter filteringCmd
+
+ # Do not use quotes around the $completions variable or else newline
+ # characters will be kept.
+ for filter in ${completions[*]}; do
+ fullFilter+="$filter|"
+ done
+
+ filteringCmd="_filedir $fullFilter"
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ $filteringCmd
+ elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
+ # File completion for directories only
+
+ local subdir
+ subdir=${completions[0]}
+ if [[ -n $subdir ]]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
+ else
+ __%[1]s_debug "Listing directories in ."
+ _filedir -d
+ fi
+ else
+ __%[1]s_handle_completion_types
+ fi
+
+ __%[1]s_handle_special_char "$cur" :
+ __%[1]s_handle_special_char "$cur" =
+
+ # Print the activeHelp statements before we finish
+ if ((${#activeHelp[*]} != 0)); then
+ printf "\n";
+ printf "%%s\n" "${activeHelp[@]}"
+ printf "\n"
+
+ # The prompt format is only available from bash 4.4.
+ # We test if it is available before using it.
+ if (x=${PS1@P}) 2> /dev/null; then
+ printf "%%s" "${PS1@P}${COMP_LINE[@]}"
+ else
+ # Can't print the prompt. Just print the
+ # text the user had typed, it is workable enough.
+ printf "%%s" "${COMP_LINE[@]}"
+ fi
+ fi
+}
+
+# Separate activeHelp lines from real completions.
+# Fills the $activeHelp and $completions arrays.
+__%[1]s_extract_activeHelp() {
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+
+ while IFS='' read -r comp; do
+ if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
+ comp=${comp:endIndex}
+ __%[1]s_debug "ActiveHelp found: $comp"
+ if [[ -n $comp ]]; then
+ activeHelp+=("$comp")
+ fi
+ else
+ # Not an activeHelp line but a normal completion
+ completions+=("$comp")
+ fi
+ done <<<"${out}"
+}
+
+__%[1]s_handle_completion_types() {
+ __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
+
+ case $COMP_TYPE in
+ 37|42)
+ # Type: menu-complete/menu-complete-backward and insert-completions
+ # If the user requested inserting one completion at a time, or all
+ # completions at once on the command-line we must remove the descriptions.
+ # https://github.com/spf13/cobra/issues/1508
+ local tab=$'\t' comp
+ while IFS='' read -r comp; do
+ [[ -z $comp ]] && continue
+ # Strip any description
+ comp=${comp%%%%$tab*}
+ # Only consider the completions that match
+ if [[ $comp == "$cur"* ]]; then
+ COMPREPLY+=("$comp")
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+ ;;
+
+ *)
+ # Type: complete (normal completion)
+ __%[1]s_handle_standard_completion_case
+ ;;
+ esac
+}
+
+__%[1]s_handle_standard_completion_case() {
+ local tab=$'\t' comp
+
+ # Short circuit to optimize if we don't have descriptions
+ if [[ "${completions[*]}" != *$tab* ]]; then
+ IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
+ return 0
+ fi
+
+ local longest=0
+ local compline
+ # Look for the longest completion so that we can format things nicely
+ while IFS='' read -r compline; do
+ [[ -z $compline ]] && continue
+ # Strip any description before checking the length
+ comp=${compline%%%%$tab*}
+ # Only consider the completions that match
+ [[ $comp == "$cur"* ]] || continue
+ COMPREPLY+=("$compline")
+ if ((${#comp}>longest)); then
+ longest=${#comp}
+ fi
+ done < <(printf "%%s\n" "${completions[@]}")
+
+ # If there is a single completion left, remove the description text
+ if ((${#COMPREPLY[*]} == 1)); then
+ __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
+ comp="${COMPREPLY[0]%%%%$tab*}"
+ __%[1]s_debug "Removed description from single completion, which is now: ${comp}"
+ COMPREPLY[0]=$comp
+ else # Format the descriptions
+ __%[1]s_format_comp_descriptions $longest
+ fi
+}
+
+__%[1]s_handle_special_char()
+{
+ local comp="$1"
+ local char=$2
+ if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
+ local word=${comp%%"${comp##*${char}}"}
+ local idx=${#COMPREPLY[*]}
+ while ((--idx >= 0)); do
+ COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
+ done
+ fi
+}
+
+__%[1]s_format_comp_descriptions()
+{
+ local tab=$'\t'
+ local comp desc maxdesclength
+ local longest=$1
+
+ local i ci
+ for ci in ${!COMPREPLY[*]}; do
+ comp=${COMPREPLY[ci]}
+ # Properly format the description string which follows a tab character if there is one
+ if [[ "$comp" == *$tab* ]]; then
+ __%[1]s_debug "Original comp: $comp"
+ desc=${comp#*$tab}
+ comp=${comp%%%%$tab*}
+
+ # $COLUMNS stores the current shell width.
+ # Remove an extra 4 because we add 2 spaces and 2 parentheses.
+ maxdesclength=$(( COLUMNS - longest - 4 ))
+
+ # Make sure we can fit a description of at least 8 characters
+ # if we are to align the descriptions.
+ if ((maxdesclength > 8)); then
+ # Add the proper number of spaces to align the descriptions
+ for ((i = ${#comp} ; i < longest ; i++)); do
+ comp+=" "
+ done
+ else
+ # Don't pad the descriptions so we can fit more text after the completion
+ maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
+ fi
+
+ # If there is enough space for any description text,
+ # truncate the descriptions that are too long for the shell width
+ if ((maxdesclength > 0)); then
+ if ((${#desc} > maxdesclength)); then
+ desc=${desc:0:$(( maxdesclength - 1 ))}
+ desc+="…"
+ fi
+ comp+=" ($desc)"
+ fi
+ COMPREPLY[ci]=$comp
+ __%[1]s_debug "Final comp: $comp"
+ fi
+ done
+}
+
+__start_%[1]s()
+{
+ local cur prev words cword split
+
+ COMPREPLY=()
+
+ # Call _init_completion from the bash-completion package
+ # to prepare the arguments properly
+ if declare -F _init_completion >/dev/null 2>&1; then
+ _init_completion -n =: || return
+ else
+ __%[1]s_init_completion -n =: || return
+ fi
+
+ __%[1]s_debug
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $cword location, so we need
+ # to truncate the command-line ($words) up to the $cword location.
+ words=("${words[@]:0:$cword+1}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ local out directive
+ __%[1]s_get_completion_results
+ __%[1]s_process_completion_results
+}
+
+if [[ $(type -t compopt) = "builtin" ]]; then
+ complete -o default -F __start_%[1]s %[1]s
+else
+ complete -o default -o nospace -F __start_%[1]s %[1]s
+fi
+
+# ex: ts=4 sw=4 et filetype=sh
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
+
+// GenBashCompletionFileV2 generates Bash completion version 2.
+func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenBashCompletionV2(outFile, includeDesc)
+}
+
+// GenBashCompletionV2 generates Bash completion file version 2
+// and writes it to the passed writer.
+func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
+ return c.genBashCompletion(w, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 000000000..e0b0947b0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,242 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/template"
+ "time"
+ "unicode"
+)
+
+var templateFuncs = template.FuncMap{
+ "trim": strings.TrimSpace,
+ "trimRightSpace": trimRightSpace,
+ "trimTrailingWhitespaces": trimRightSpace,
+ "appendIfNotPresent": appendIfNotPresent,
+ "rpad": rpad,
+ "gt": Gt,
+ "eq": Eq,
+}
+
+var initializers []func()
+var finalizers []func()
+
+const (
+ defaultPrefixMatching = false
+ defaultCommandSorting = true
+ defaultCaseInsensitive = false
+ defaultTraverseRunHooks = false
+)
+
+// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = defaultPrefixMatching
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = defaultCommandSorting
+
+// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
+var EnableCaseInsensitive = defaultCaseInsensitive
+
+// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents.
+// By default this is disabled, which means only the first run hook to be found is executed.
+var EnableTraverseRunHooks = defaultTraverseRunHooks
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+ templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+ for k, v := range tmplFuncs {
+ templateFuncs[k] = v
+ }
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+ initializers = append(initializers, y...)
+}
+
+// OnFinalize sets the passed functions to be run when each command's
+// Execute method is terminated.
+func OnFinalize(y ...func()) {
+ finalizers = append(finalizers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+ var left, right int64
+ av := reflect.ValueOf(a)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ left = int64(av.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ left = av.Int()
+ case reflect.String:
+ left, _ = strconv.ParseInt(av.String(), 10, 64)
+ }
+
+ bv := reflect.ValueOf(b)
+
+ switch bv.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ right = int64(bv.Len())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ right = bv.Int()
+ case reflect.String:
+ right, _ = strconv.ParseInt(bv.String(), 10, 64)
+ }
+
+ return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ switch av.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ panic("Eq called on unsupported type")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return av.Int() == bv.Int()
+ case reflect.String:
+ return av.String() == bv.String()
+ }
+ return false
+}
+
+func trimRightSpace(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+ if strings.Contains(s, stringToAppend) {
+ return s
+ }
+ return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+ formattedString := fmt.Sprintf("%%-%ds", padding)
+ return fmt.Sprintf(formattedString, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+ t := template.New("top")
+ t.Funcs(templateFuncs)
+ template.Must(t.Parse(text))
+ return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+ if ignoreCase {
+ s = strings.ToLower(s)
+ t = strings.ToLower(t)
+ }
+ d := make([][]int, len(s)+1)
+ for i := range d {
+ d[i] = make([]int, len(t)+1)
+ d[i][0] = i
+ }
+ for j := range d[0] {
+ d[0][j] = j
+ }
+ for j := 1; j <= len(t); j++ {
+ for i := 1; i <= len(s); i++ {
+ if s[i-1] == t[j-1] {
+ d[i][j] = d[i-1][j-1]
+ } else {
+ min := d[i-1][j]
+ if d[i][j-1] < min {
+ min = d[i][j-1]
+ }
+ if d[i-1][j-1] < min {
+ min = d[i-1][j-1]
+ }
+ d[i][j] = min + 1
+ }
+ }
+
+ }
+ return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+ for _, b := range list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 000000000..54748fc67
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1896 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
+ CommandDisplayNameAnnotation = "cobra_annotation_command_display_name"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Group Structure to manage groups for commands
+type Group struct {
+ ID string
+ Title string
+}
+
+// Command is just that, a command for your application.
+// E.g. 'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+ // Use is the one-line usage message.
+ // Recommended syntax is as follows:
+ // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
+ // ... indicates that you can specify multiple values for the previous argument.
+ // | indicates mutually exclusive information. You can use the argument to the left of the separator or the
+ // argument to the right of the separator. You cannot use both arguments in a single use of the command.
+ // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are
+ // optional, they are enclosed in brackets ([ ]).
+ // Example: add [-F file | -D dir]... [-f format] profile
+ Use string
+
+ // Aliases is an array of aliases that can be used instead of the first word in Use.
+ Aliases []string
+
+ // SuggestFor is an array of command names for which this command will be suggested -
+ // similar to aliases but only suggests.
+ SuggestFor []string
+
+ // Short is the short description shown in the 'help' output.
+ Short string
+
+ // The group id under which this subcommand is grouped in the 'help' output of its parent.
+ GroupID string
+
+ // Long is the long message shown in the 'help ' output.
+ Long string
+
+ // Example is examples of how to use the command.
+ Example string
+
+ // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions
+ ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+
+ // Expected arguments
+ Args PositionalArgs
+
+ // ArgAliases is List of aliases for ValidArgs.
+ // These are not suggested to the user in the shell completion,
+ // but accepted if entered manually.
+ ArgAliases []string
+
+ // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator.
+ // For portability with other shells, it is recommended to instead use ValidArgsFunction
+ BashCompletionFunction string
+
+ // Deprecated defines, if this command is deprecated and should print this string when used.
+ Deprecated string
+
+ // Annotations are key/value pairs that can be used by applications to identify or
+ // group commands or set special options.
+ Annotations map[string]string
+
+ // Version defines the version for this command. If this value is non-empty and the command does not
+ // define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
+ Version string
+
+ // The *Run functions are executed in the following order:
+ // * PersistentPreRun()
+ // * PreRun()
+ // * Run()
+ // * PostRun()
+ // * PersistentPostRun()
+ // All functions get the same args, the arguments after the command name.
+ // The *PreRun and *PostRun functions will only be executed if the Run function of the current
+ // command has been declared.
+ //
+ // PersistentPreRun: children of this command will inherit and execute.
+ PersistentPreRun func(cmd *Command, args []string)
+ // PersistentPreRunE: PersistentPreRun but returns an error.
+ PersistentPreRunE func(cmd *Command, args []string) error
+ // PreRun: children of this command will not inherit.
+ PreRun func(cmd *Command, args []string)
+ // PreRunE: PreRun but returns an error.
+ PreRunE func(cmd *Command, args []string) error
+ // Run: Typically the actual work function. Most commands will only implement this.
+ Run func(cmd *Command, args []string)
+ // RunE: Run but returns an error.
+ RunE func(cmd *Command, args []string) error
+ // PostRun: run after the Run command.
+ PostRun func(cmd *Command, args []string)
+ // PostRunE: PostRun but returns an error.
+ PostRunE func(cmd *Command, args []string) error
+ // PersistentPostRun: children of this command will inherit and execute after PostRun.
+ PersistentPostRun func(cmd *Command, args []string)
+ // PersistentPostRunE: PersistentPostRun but returns an error.
+ PersistentPostRunE func(cmd *Command, args []string) error
+
+ // groups for subcommands
+ commandgroups []*Group
+
+ // args is actual args parsed from flags.
+ args []string
+ // flagErrorBuf contains all error messages from pflag.
+ flagErrorBuf *bytes.Buffer
+ // flags is full set of flags.
+ flags *flag.FlagSet
+ // pflags contains persistent flags.
+ pflags *flag.FlagSet
+ // lflags contains local flags.
+ // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call
+ lflags *flag.FlagSet
+ // iflags contains inherited flags.
+ // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call
+ iflags *flag.FlagSet
+ // parentsPflags is all persistent flags of cmd's parents.
+ parentsPflags *flag.FlagSet
+ // globNormFunc is the global normalization function
+ // that we can use on every pflag set and children commands
+ globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+ // usageFunc is usage func defined by user.
+ usageFunc func(*Command) error
+ // usageTemplate is usage template defined by user.
+ usageTemplate string
+ // flagErrorFunc is func defined by user and it's called when the parsing of
+ // flags returns an error.
+ flagErrorFunc func(*Command, error) error
+ // helpTemplate is help template defined by user.
+ helpTemplate string
+ // helpFunc is help func defined by user.
+ helpFunc func(*Command, []string)
+ // helpCommand is command with usage 'help'. If it's not defined by user,
+ // cobra uses default help command.
+ helpCommand *Command
+ // helpCommandGroupID is the group id for the helpCommand
+ helpCommandGroupID string
+
+ // completionCommandGroupID is the group id for the completion command
+ completionCommandGroupID string
+
+ // versionTemplate is the version template defined by user.
+ versionTemplate string
+
+ // errPrefix is the error message prefix defined by user.
+ errPrefix string
+
+ // inReader is a reader defined by the user that replaces stdin
+ inReader io.Reader
+ // outWriter is a writer defined by the user that replaces stdout
+ outWriter io.Writer
+ // errWriter is a writer defined by the user that replaces stderr
+ errWriter io.Writer
+
+ // FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // CompletionOptions is a set of options to control the handling of shell completion
+ CompletionOptions CompletionOptions
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
+}
+
+// Context returns underlying command context. If command was executed
+// with ExecuteContext or the context was set with SetContext, the
+// previously set context will be returned. Otherwise, nil is returned.
+//
+// Notice that a call to Execute and ExecuteC will replace a nil context of
+// a command with a context.Background, so a background context will be
+// returned by Context after one of these functions has been called.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
+// SetContext sets context for the command. This context will be overwritten by
+// Command.ExecuteContext or Command.ExecuteContextC.
+func (c *Command) SetContext(ctx context.Context) {
+ c.ctx = ctx
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+ c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+ c.outWriter = output
+ c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+ c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+ c.errWriter = newErr
+}
+
+// SetIn sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+ c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+ c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+ c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+ c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+ c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+ c.helpCommand = cmd
+}
+
+// SetHelpCommandGroupID sets the group id of the help command.
+func (c *Command) SetHelpCommandGroupID(groupID string) {
+ if c.helpCommand != nil {
+ c.helpCommand.GroupID = groupID
+ }
+ // helpCommandGroupID is used if no helpCommand is defined by the user
+ c.helpCommandGroupID = groupID
+}
+
+// SetCompletionCommandGroupID sets the group id of the completion command.
+func (c *Command) SetCompletionCommandGroupID(groupID string) {
+ // completionCommandGroupID is used if no completion command is defined by the user
+ c.Root().completionCommandGroupID = groupID
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+ c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+ c.versionTemplate = s
+}
+
+// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix.
+func (c *Command) SetErrPrefix(s string) {
+ c.errPrefix = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+ c.Flags().SetNormalizeFunc(n)
+ c.PersistentFlags().SetNormalizeFunc(n)
+ c.globNormFunc = n
+
+ for _, command := range c.commands {
+ command.SetGlobalNormalizationFunc(n)
+ }
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+ return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+ return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+ return c.getErr(os.Stderr)
+}
+
+// InOrStdin returns input to stdin
+func (c *Command) InOrStdin() io.Reader {
+ return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+ if c.outWriter != nil {
+ return c.outWriter
+ }
+ if c.HasParent() {
+ return c.parent.getOut(def)
+ }
+ return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+ if c.errWriter != nil {
+ return c.errWriter
+ }
+ if c.HasParent() {
+ return c.parent.getErr(def)
+ }
+ return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+ if c.inReader != nil {
+ return c.inReader
+ }
+ if c.HasParent() {
+ return c.parent.getIn(def)
+ }
+ return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+ if c.usageFunc != nil {
+ return c.usageFunc
+ }
+ if c.HasParent() {
+ return c.Parent().UsageFunc()
+ }
+ return func(c *Command) error {
+ c.mergePersistentFlags()
+ err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ return err
+ }
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+ return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+ if c.helpFunc != nil {
+ return c.helpFunc
+ }
+ if c.HasParent() {
+ return c.Parent().HelpFunc()
+ }
+ return func(c *Command, a []string) {
+ c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
+ err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+ if err != nil {
+ c.PrintErrln(err)
+ }
+ }
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+ c.HelpFunc()(c, []string{})
+ return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+ // Storing normal writers
+ tmpOutput := c.outWriter
+ tmpErr := c.errWriter
+
+ bb := new(bytes.Buffer)
+ c.outWriter = bb
+ c.errWriter = bb
+
+ CheckErr(c.Usage())
+
+ // Setting things back to normal
+ c.outWriter = tmpOutput
+ c.errWriter = tmpErr
+
+ return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+ if c.flagErrorFunc != nil {
+ return c.flagErrorFunc
+ }
+
+ if c.HasParent() {
+ return c.parent.FlagErrorFunc()
+ }
+ return func(c *Command, err error) error {
+ return err
+ }
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+ if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+ return minUsagePadding
+ }
+ return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+ if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+ return minCommandPathPadding
+ }
+ return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+ if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+ return minNamePadding
+ }
+ return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+ if c.usageTemplate != "" {
+ return c.usageTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.UsageTemplate()
+ }
+ return `Usage:{{if .Runnable}}
+ {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+ {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+ {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}}
+
+Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}}
+
+{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}}
+
+Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}}
+ {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+ {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+ if c.helpTemplate != "" {
+ return c.helpTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.HelpTemplate()
+ }
+ return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+ if c.versionTemplate != "" {
+ return c.versionTemplate
+ }
+
+ if c.HasParent() {
+ return c.parent.VersionTemplate()
+ }
+ return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+// ErrPrefix return error message prefix for the command
+func (c *Command) ErrPrefix() string {
+ if c.errPrefix != "" {
+ return c.errPrefix
+ }
+
+ if c.HasParent() {
+ return c.parent.ErrPrefix()
+ }
+ return "Error:"
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ flag := fs.Lookup(name)
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+ if len(name) == 0 {
+ return false
+ }
+
+ flag := fs.ShorthandLookup(name[:1])
+ if flag == nil {
+ return false
+ }
+ return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+
+ commands := []string{}
+ flags := c.Flags()
+
+Loop:
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ switch {
+ case s == "--":
+ // "--" terminates the flags
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ // If '--flag arg' then
+ // delete arg from args.
+ fallthrough // (do the same as below)
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // If '-f arg' then
+ // delete 'arg' from args or break the loop if len(args) <= 1.
+ if len(args) <= 1 {
+ break Loop
+ } else {
+ args = args[1:]
+ continue
+ }
+ case s != "" && !strings.HasPrefix(s, "-"):
+ commands = append(commands, s)
+ }
+ }
+
+ return commands
+}
+
+// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+// Special care needs to be taken not to remove a flag value.
+func (c *Command) argsMinusFirstX(args []string, x string) []string {
+ if len(args) == 0 {
+ return args
+ }
+ c.mergePersistentFlags()
+ flags := c.Flags()
+
+Loop:
+ for pos := 0; pos < len(args); pos++ {
+ s := args[pos]
+ switch {
+ case s == "--":
+ // -- means we have reached the end of the parseable args. Break out of the loop now.
+ break Loop
+ case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+ fallthrough
+ case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+ // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip
+ // over the next arg, because that is the value of this flag.
+ pos++
+ continue
+ case !strings.HasPrefix(s, "-"):
+ // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so,
+ // return the args, excluding the one at this position.
+ if s == x {
+ ret := make([]string, 0, len(args)-1)
+ ret = append(ret, args[:pos]...)
+ ret = append(ret, args[pos+1:]...)
+ return ret
+ }
+ }
+ }
+ return args
+}
+
+func isFlagArg(arg string) bool {
+ return ((len(arg) >= 3 && arg[0:2] == "--") ||
+ (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+ var innerfind func(*Command, []string) (*Command, []string)
+
+ innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+ argsWOflags := stripFlags(innerArgs, c)
+ if len(argsWOflags) == 0 {
+ return c, innerArgs
+ }
+ nextSubCmd := argsWOflags[0]
+
+ cmd := c.findNext(nextSubCmd)
+ if cmd != nil {
+ return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd))
+ }
+ return c, innerArgs
+ }
+
+ commandFound, a := innerfind(c, args)
+ if commandFound.Args == nil {
+ return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+ }
+ return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+ if c.DisableSuggestions {
+ return ""
+ }
+ if c.SuggestionsMinimumDistance <= 0 {
+ c.SuggestionsMinimumDistance = 2
+ }
+ var sb strings.Builder
+ if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+ sb.WriteString("\n\nDid you mean this?\n")
+ for _, s := range suggestions {
+ _, _ = fmt.Fprintf(&sb, "\t%v\n", s)
+ }
+ }
+ return sb.String()
+}
+
+func (c *Command) findNext(next string) *Command {
+ matches := make([]*Command, 0)
+ for _, cmd := range c.commands {
+ if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) {
+ cmd.commandCalledAs.name = next
+ return cmd
+ }
+ if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+ matches = append(matches, cmd)
+ }
+ }
+
+ if len(matches) == 1 {
+ // Temporarily disable gosec G602, which produces a false positive.
+ // See https://github.com/securego/gosec/issues/1005.
+ return matches[0] // #nosec G602
+ }
+
+ return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+ flags := []string{}
+ inFlag := false
+
+ for i, arg := range args {
+ switch {
+ // A long flag with a space separated value
+ case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+ // TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+ inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+ flags = append(flags, arg)
+ continue
+ // A short flag with a space separated value
+ case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+ inFlag = true
+ flags = append(flags, arg)
+ continue
+ // The value for a flag
+ case inFlag:
+ inFlag = false
+ flags = append(flags, arg)
+ continue
+ // A flag without a value, or with an `=` separated value
+ case isFlagArg(arg):
+ flags = append(flags, arg)
+ continue
+ }
+
+ cmd := c.findNext(arg)
+ if cmd == nil {
+ return c, args, nil
+ }
+
+ if err := c.ParseFlags(flags); err != nil {
+ return nil, args, err
+ }
+ return cmd.Traverse(args[i+1:])
+ }
+ return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+ suggestions := []string{}
+ for _, cmd := range c.commands {
+ if cmd.IsAvailableCommand() {
+ levenshteinDistance := ld(typedName, cmd.Name(), true)
+ suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+ suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+ if suggestByLevenshtein || suggestByPrefix {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ for _, explicitSuggestion := range cmd.SuggestFor {
+ if strings.EqualFold(typedName, explicitSuggestion) {
+ suggestions = append(suggestions, cmd.Name())
+ }
+ }
+ }
+ }
+ return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+ if c.HasParent() {
+ fn(c.Parent())
+ c.Parent().VisitParents(fn)
+ }
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+ if c.HasParent() {
+ return c.Parent().Root()
+ }
+ return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+ return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+ if c == nil {
+ return fmt.Errorf("called Execute() on a nil Command")
+ }
+
+ if len(c.Deprecated) > 0 {
+ c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+ }
+
+ // initialize help and version flag at the last point possible to allow for user
+ // overriding
+ c.InitDefaultHelpFlag()
+ c.InitDefaultVersionFlag()
+
+ err = c.ParseFlags(a)
+ if err != nil {
+ return c.FlagErrorFunc()(c, err)
+ }
+
+ // If help is called, regardless of other flags, return we want help.
+ // Also say we need help if the command isn't runnable.
+ helpVal, err := c.Flags().GetBool("help")
+ if err != nil {
+ // should be impossible to get here as we always declare a help
+ // flag in InitDefaultHelpFlag()
+ c.Println("\"help\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+
+ if helpVal {
+ return flag.ErrHelp
+ }
+
+ // for back-compat, only add version flag behavior if version is defined
+ if c.Version != "" {
+ versionVal, err := c.Flags().GetBool("version")
+ if err != nil {
+ c.Println("\"version\" flag declared as non-bool. Please correct your code")
+ return err
+ }
+ if versionVal {
+ err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+ if err != nil {
+ c.Println(err)
+ }
+ return err
+ }
+ }
+
+ if !c.Runnable() {
+ return flag.ErrHelp
+ }
+
+ c.preRun()
+
+ defer c.postRun()
+
+ argWoFlags := c.Flags().Args()
+ if c.DisableFlagParsing {
+ argWoFlags = a
+ }
+
+ if err := c.ValidateArgs(argWoFlags); err != nil {
+ return err
+ }
+
+ parents := make([]*Command, 0, 5)
+ for p := c; p != nil; p = p.Parent() {
+ if EnableTraverseRunHooks {
+ // When EnableTraverseRunHooks is set:
+ // - Execute all persistent pre-runs from the root parent till this command.
+ // - Execute all persistent post-runs from this command till the root parent.
+ parents = append([]*Command{p}, parents...)
+ } else {
+ // Otherwise, execute only the first found persistent hook.
+ parents = append(parents, p)
+ }
+ }
+ for _, p := range parents {
+ if p.PersistentPreRunE != nil {
+ if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPreRun != nil {
+ p.PersistentPreRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+ if c.PreRunE != nil {
+ if err := c.PreRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PreRun != nil {
+ c.PreRun(c, argWoFlags)
+ }
+
+ if err := c.ValidateRequiredFlags(); err != nil {
+ return err
+ }
+ if err := c.ValidateFlagGroups(); err != nil {
+ return err
+ }
+
+ if c.RunE != nil {
+ if err := c.RunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else {
+ c.Run(c, argWoFlags)
+ }
+ if c.PostRunE != nil {
+ if err := c.PostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ } else if c.PostRun != nil {
+ c.PostRun(c, argWoFlags)
+ }
+ for p := c; p != nil; p = p.Parent() {
+ if p.PersistentPostRunE != nil {
+ if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+ return err
+ }
+ if !EnableTraverseRunHooks {
+ break
+ }
+ } else if p.PersistentPostRun != nil {
+ p.PersistentPostRun(c, argWoFlags)
+ if !EnableTraverseRunHooks {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (c *Command) preRun() {
+ for _, x := range initializers {
+ x()
+ }
+}
+
+func (c *Command) postRun() {
+ for _, x := range finalizers {
+ x()
+ }
+}
+
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+ _, err := c.ExecuteC()
+ return err
+}
+
+// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs
+// functions.
+func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) {
+ c.ctx = ctx
+ return c.ExecuteC()
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
+ // Regardless of what command execute is called on, run on Root only
+ if c.HasParent() {
+ return c.Root().ExecuteC()
+ }
+
+ // windows hook
+ if preExecHookFn != nil {
+ preExecHookFn(c)
+ }
+
+ // initialize help at the last point to allow for user overriding
+ c.InitDefaultHelpCmd()
+ // initialize completion at the last point to allow for user overriding
+ c.InitDefaultCompletionCmd()
+
+ // Now that all commands have been created, let's make sure all groups
+ // are properly created also
+ c.checkCommandGroups()
+
+ args := c.args
+
+ // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+ if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+ args = os.Args[1:]
+ }
+
+ // initialize the hidden command to be used for shell completion
+ c.initCompleteCmd(args)
+
+ var flags []string
+ if c.TraverseChildren {
+ cmd, flags, err = c.Traverse(args)
+ } else {
+ cmd, flags, err = c.Find(args)
+ }
+ if err != nil {
+ // If found parse to a subcommand and then failed, talk about the subcommand
+ if cmd != nil {
+ c = cmd
+ }
+ if !c.SilenceErrors {
+ c.PrintErrln(c.ErrPrefix(), err.Error())
+ c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath())
+ }
+ return c, err
+ }
+
+ cmd.commandCalledAs.called = true
+ if cmd.commandCalledAs.name == "" {
+ cmd.commandCalledAs.name = cmd.Name()
+ }
+
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
+ err = cmd.execute(flags)
+ if err != nil {
+ // Always show help if requested, even if SilenceErrors is in
+ // effect
+ if errors.Is(err, flag.ErrHelp) {
+ cmd.HelpFunc()(cmd, args)
+ return cmd, nil
+ }
+
+ // If root command has SilenceErrors flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceErrors && !c.SilenceErrors {
+ c.PrintErrln(cmd.ErrPrefix(), err.Error())
+ }
+
+ // If root command has SilenceUsage flagged,
+ // all subcommands should respect it
+ if !cmd.SilenceUsage && !c.SilenceUsage {
+ c.Println(cmd.UsageString())
+ }
+ }
+ return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+ if c.Args == nil {
+ return ArbitraryArgs(c, args)
+ }
+ return c.Args(c, args)
+}
+
+// ValidateRequiredFlags validates all required flags are present and returns an error otherwise
+func (c *Command) ValidateRequiredFlags() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+ missingFlagNames := []string{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+ if !found {
+ return
+ }
+ if (requiredAnnotation[0] == "true") && !pflag.Changed {
+ missingFlagNames = append(missingFlagNames, pflag.Name)
+ }
+ })
+
+ if len(missingFlagNames) > 0 {
+ return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+ }
+ return nil
+}
+
+// checkCommandGroups checks if a command has been added to a group that does not exists.
+// If so, we panic because it indicates a coding error that should be corrected.
+func (c *Command) checkCommandGroups() {
+ for _, sub := range c.commands {
+ // if Group is not defined let the developer know right away
+ if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) {
+ panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath()))
+ }
+
+ sub.checkCommandGroups()
+ }
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("help") == nil {
+ usage := "help for "
+ name := c.displayName()
+ if name == "" {
+ usage += "this command"
+ } else {
+ usage += name
+ }
+ c.Flags().BoolP("help", "h", false, usage)
+ _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+ if c.Version == "" {
+ return
+ }
+
+ c.mergePersistentFlags()
+ if c.Flags().Lookup("version") == nil {
+ usage := "version for "
+ if c.Name() == "" {
+ usage += "this command"
+ } else {
+ usage += c.Name()
+ }
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
+ _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"})
+ }
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+ if !c.HasSubCommands() {
+ return
+ }
+
+ if c.helpCommand == nil {
+ c.helpCommand = &Command{
+ Use: "help [command]",
+ Short: "Help about any command",
+ Long: `Help provides help for any command in the application.
+Simply type ` + c.displayName() + ` help [path to command] for full details.`,
+ ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ var completions []string
+ cmd, _, e := c.Root().Find(args)
+ if e != nil {
+ return nil, ShellCompDirectiveNoFileComp
+ }
+ if cmd == nil {
+ // Root help command.
+ cmd = c.Root()
+ }
+ for _, subCmd := range cmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+ }
+ return completions, ShellCompDirectiveNoFileComp
+ },
+ Run: func(c *Command, args []string) {
+ cmd, _, e := c.Root().Find(args)
+ if cmd == nil || e != nil {
+ c.Printf("Unknown help topic %#q\n", args)
+ CheckErr(c.Root().Usage())
+ } else {
+ cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+ cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
+ CheckErr(cmd.Help())
+ }
+ },
+ GroupID: c.helpCommandGroupID,
+ }
+ }
+ c.RemoveCommand(c.helpCommand)
+ c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+ c.parent = nil
+ c.commands = nil
+ c.helpCommand = nil
+ c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int { return len(c) }
+func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+ // do not sort commands if it already sorted or sorting was disabled
+ if EnableCommandSorting && !c.commandsAreSorted {
+ sort.Sort(commandSorterByName(c.commands))
+ c.commandsAreSorted = true
+ }
+ return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+ for i, x := range cmds {
+ if cmds[i] == c {
+ panic("Command can't be a child of itself")
+ }
+ cmds[i].parent = c
+ // update max lengths
+ usageLen := len(x.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(x.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(x.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ // If global normalization function exists, update all children
+ if c.globNormFunc != nil {
+ x.SetGlobalNormalizationFunc(c.globNormFunc)
+ }
+ c.commands = append(c.commands, x)
+ c.commandsAreSorted = false
+ }
+}
+
+// Groups returns a slice of child command groups.
+func (c *Command) Groups() []*Group {
+ return c.commandgroups
+}
+
+// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group
+func (c *Command) AllChildCommandsHaveGroup() bool {
+ for _, sub := range c.commands {
+ if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainsGroup return if groupID exists in the list of command groups.
+func (c *Command) ContainsGroup(groupID string) bool {
+ for _, x := range c.commandgroups {
+ if x.ID == groupID {
+ return true
+ }
+ }
+ return false
+}
+
+// AddGroup adds one or more command groups to this parent command.
+func (c *Command) AddGroup(groups ...*Group) {
+ c.commandgroups = append(c.commandgroups, groups...)
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+ commands := []*Command{}
+main:
+ for _, command := range c.commands {
+ for _, cmd := range cmds {
+ if command == cmd {
+ command.parent = nil
+ continue main
+ }
+ }
+ commands = append(commands, command)
+ }
+ c.commands = commands
+ // recompute all lengths
+ c.commandsMaxUseLen = 0
+ c.commandsMaxCommandPathLen = 0
+ c.commandsMaxNameLen = 0
+ for _, command := range c.commands {
+ usageLen := len(command.Use)
+ if usageLen > c.commandsMaxUseLen {
+ c.commandsMaxUseLen = usageLen
+ }
+ commandPathLen := len(command.CommandPath())
+ if commandPathLen > c.commandsMaxCommandPathLen {
+ c.commandsMaxCommandPathLen = commandPathLen
+ }
+ nameLen := len(command.Name())
+ if nameLen > c.commandsMaxNameLen {
+ c.commandsMaxNameLen = nameLen
+ }
+ }
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+ fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+ c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+ c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+ fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+ c.PrintErr(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+ c.PrintErr(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+ if c.HasParent() {
+ return c.Parent().CommandPath() + " " + c.Name()
+ }
+ return c.displayName()
+}
+
+func (c *Command) displayName() string {
+ if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok {
+ return displayName
+ }
+ return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+ var useline string
+ use := strings.Replace(c.Use, c.Name(), c.displayName(), 1)
+ if c.HasParent() {
+ useline = c.parent.CommandPath() + " " + use
+ } else {
+ useline = use
+ }
+ if c.DisableFlagsInUseLine {
+ return useline
+ }
+ if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+ useline += " [flags]"
+ }
+ return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+ c.Println("DebugFlags called on", c.Name())
+ var debugflags func(*Command)
+
+ debugflags = func(x *Command) {
+ if x.HasFlags() || x.HasPersistentFlags() {
+ c.Println(x.Name())
+ }
+ if x.HasFlags() {
+ x.flags.VisitAll(func(f *flag.Flag) {
+ if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]")
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]")
+ }
+ })
+ }
+ if x.HasPersistentFlags() {
+ x.pflags.VisitAll(func(f *flag.Flag) {
+ if x.HasFlags() {
+ if x.flags.Lookup(f.Name) == nil {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ } else {
+ c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]")
+ }
+ })
+ }
+ c.Println(x.flagErrorBuf)
+ if x.HasSubCommands() {
+ for _, y := range x.commands {
+ debugflags(y)
+ }
+ }
+ }
+
+ debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+ name := c.Use
+ i := strings.Index(name, " ")
+ if i >= 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+ for _, a := range c.Aliases {
+ if commandNameMatches(a, s) {
+ return true
+ }
+ }
+ return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+ if c.commandCalledAs.called {
+ return c.commandCalledAs.name
+ }
+ return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+ if strings.HasPrefix(c.Name(), prefix) {
+ c.commandCalledAs.name = c.Name()
+ return true
+ }
+ for _, alias := range c.Aliases {
+ if strings.HasPrefix(alias, prefix) {
+ c.commandCalledAs.name = alias
+ return true
+ }
+ }
+ return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+ return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+ return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+ return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+ return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+ if len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ if c.HasParent() && c.Parent().helpCommand == c {
+ return false
+ }
+
+ if c.Runnable() || c.HasAvailableSubCommands() {
+ return true
+ }
+
+ return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+ // if a command is runnable, deprecated, or hidden it is not a 'help' command
+ if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+ return false
+ }
+
+ // if any non-help sub commands are found, the command is not a 'help' command
+ for _, sub := range c.commands {
+ if !sub.IsAdditionalHelpTopicCommand() {
+ return false
+ }
+ }
+
+ // the command either has no sub commands, or no non-help sub commands
+ return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+ // return true on the first found available 'help' sub command
+ for _, sub := range c.commands {
+ if sub.IsAdditionalHelpTopicCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available 'help' sub commands
+ return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+ // return true on the first found available (non deprecated/help/hidden)
+ // sub command
+ for _, sub := range c.commands {
+ if sub.IsAvailableCommand() {
+ return true
+ }
+ }
+
+ // the command either has no sub commands, or no available (non deprecated/help/hidden)
+ // sub commands
+ return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+ return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+ return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+ if c.flags == nil {
+ c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.flags.SetOutput(c.flagErrorBuf)
+ }
+
+ return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+ persistentFlags := c.PersistentFlags()
+
+ out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.LocalFlags().VisitAll(func(f *flag.Flag) {
+ if persistentFlags.Lookup(f.Name) == nil {
+ out.AddFlag(f)
+ }
+ })
+ return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) LocalFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.lflags == nil {
+ c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.lflags.SetOutput(c.flagErrorBuf)
+ }
+ c.lflags.SortFlags = c.Flags().SortFlags
+ if c.globNormFunc != nil {
+ c.lflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ addToLocal := func(f *flag.Flag) {
+ // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag
+ if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) {
+ c.lflags.AddFlag(f)
+ }
+ }
+ c.Flags().VisitAll(addToLocal)
+ c.PersistentFlags().VisitAll(addToLocal)
+ return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+ c.mergePersistentFlags()
+
+ if c.iflags == nil {
+ c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.iflags.SetOutput(c.flagErrorBuf)
+ }
+
+ local := c.LocalFlags()
+ if c.globNormFunc != nil {
+ c.iflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.parentsPflags.VisitAll(func(f *flag.Flag) {
+ if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+ c.iflags.AddFlag(f)
+ }
+ })
+ return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+// This function does not modify the flags of the current command, it's purpose is to return the current state.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+ return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+ if c.pflags == nil {
+ c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ c.pflags.SetOutput(c.flagErrorBuf)
+ }
+ return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+ c.flagErrorBuf = new(bytes.Buffer)
+ c.flagErrorBuf.Reset()
+ c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.flags.SetOutput(c.flagErrorBuf)
+ c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.pflags.SetOutput(c.flagErrorBuf)
+
+ c.lflags = nil
+ c.iflags = nil
+ c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+ return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+ return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+ return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+ return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+ return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+ return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+ return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+ return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+ flag = c.Flags().Lookup(name)
+
+ if flag == nil {
+ flag = c.persistentFlag(name)
+ }
+
+ return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+ if c.HasPersistentFlags() {
+ flag = c.PersistentFlags().Lookup(name)
+ }
+
+ if flag == nil {
+ c.updateParentsPflags()
+ flag = c.parentsPflags.Lookup(name)
+ }
+ return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ if c.flagErrorBuf == nil {
+ c.flagErrorBuf = new(bytes.Buffer)
+ }
+ beforeErrorBufLen := c.flagErrorBuf.Len()
+ c.mergePersistentFlags()
+
+ // do it here after merging all flags and just before parse
+ c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+ err := c.Flags().Parse(args)
+ // Print warnings if they occurred (e.g. deprecated flag messages).
+ if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+ c.Print(c.flagErrorBuf.String())
+ }
+
+ return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+ return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+ c.updateParentsPflags()
+ c.Flags().AddFlagSet(c.PersistentFlags())
+ c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+ if c.parentsPflags == nil {
+ c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError)
+ c.parentsPflags.SetOutput(c.flagErrorBuf)
+ c.parentsPflags.SortFlags = false
+ }
+
+ if c.globNormFunc != nil {
+ c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+ }
+
+ c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+ c.VisitParents(func(parent *Command) {
+ c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+ })
+}
+
+// commandNameMatches checks if two command names are equal
+// taking into account case sensitivity according to
+// EnableCaseInsensitive global configuration.
+func commandNameMatches(s string, t string) bool {
+ if EnableCaseInsensitive {
+ return strings.EqualFold(s, t)
+ }
+
+ return s == t
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 000000000..307f0c127
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,20 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 000000000..adbef395c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,41 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+// +build windows
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+ if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+ c.Print(MousetrapHelpText)
+ if MousetrapDisplayDuration > 0 {
+ time.Sleep(MousetrapDisplayDuration)
+ } else {
+ c.Println("Press return to continue...")
+ fmt.Scanln()
+ }
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
new file mode 100644
index 000000000..c0c08b057
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -0,0 +1,939 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// lock for reading and writing from flagCompletionFunctions
+var flagCompletionMutex = &sync.RWMutex{}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+type flagCompError struct {
+ subCommand string
+ flagName string
+}
+
+func (e *flagCompError) Error() string {
+ return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
+}
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveFilterFileExt indicates that the provided completions
+ // should be used as file extension filters.
+ // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
+ // is a shortcut to using this directive explicitly. The BashCompFilenameExt
+ // annotation can also be used to obtain the same behavior for flags.
+ ShellCompDirectiveFilterFileExt
+
+ // ShellCompDirectiveFilterDirs indicates that only directory names should
+ // be provided in file completion. To request directory names within another
+ // directory, the returned completions should specify the directory within
+ // which to search. The BashCompSubdirsInDir annotation can be used to
+ // obtain the same behavior but only for flags.
+ ShellCompDirectiveFilterDirs
+
+ // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
+ // in which the completions are provided
+ ShellCompDirectiveKeepOrder
+
+ // ===========================================================================
+
+ // All directives using iota should be above this one.
+ // For internal use.
+ shellCompDirectiveMaxValue
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ // This one must be last to avoid messing up the iota count.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+const (
+ // Constants for the completion command
+ compCmdName = "completion"
+ compCmdNoDescFlagName = "no-descriptions"
+ compCmdNoDescFlagDesc = "disable completion descriptions"
+ compCmdNoDescFlagDefault = false
+)
+
+// CompletionOptions are the options to control shell completion
+type CompletionOptions struct {
+ // DisableDefaultCmd prevents Cobra from creating a default 'completion' command
+ DisableDefaultCmd bool
+ // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
+ // for shells that support completion descriptions
+ DisableNoDescFlag bool
+ // DisableDescriptions turns off all completion descriptions for shells
+ // that support them
+ DisableDescriptions bool
+ // HiddenDefaultCmd makes the default 'completion' command hidden
+ HiddenDefaultCmd bool
+}
+
+// NoFileCompletions can be used to disable file completion for commands that should
+// not trigger file completions.
+func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return nil, ShellCompDirectiveNoFileComp
+}
+
+// FixedCompletions can be used to create a completion function which always
+// returns the same results.
+func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
+ return choices, directive
+ }
+}
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ flagCompletionMutex.Lock()
+ defer flagCompletionMutex.Unlock()
+
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
+func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return nil, false
+ }
+
+ flagCompletionMutex.RLock()
+ defer flagCompletionMutex.RUnlock()
+
+ completionFunc, exists := flagCompletionFunctions[flag]
+ return completionFunc, exists
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if d&ShellCompDirectiveFilterFileExt != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterFileExt")
+ }
+ if d&ShellCompDirectiveFilterDirs != 0 {
+ directives = append(directives, "ShellCompDirectiveFilterDirs")
+ }
+ if d&ShellCompDirectiveKeepOrder != 0 {
+ directives = append(directives, "ShellCompDirectiveKeepOrder")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d >= shellCompDirectiveMaxValue {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// initCompleteCmd adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd
+ if !noDescriptions {
+ if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil {
+ noDescriptions = !doDescriptions
+ }
+ }
+ noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable
+ out := finalCmd.OutOrStdout()
+ for _, comp := range completions {
+ if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) {
+ // Remove all activeHelp entries if it's disabled.
+ continue
+ }
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.SplitN(comp, "\t", 2)[0]
+ }
+
+ // Make sure we only write the first line to the output.
+ // This is needed if a description contains a linebreak.
+ // Otherwise the shell scripts will interpret the other lines as new flags
+ // and could therefore provide a wrong completion.
+ comp = strings.SplitN(comp, "\n", 2)[0]
+
+ // Finally trim the completion. This is especially important to get rid
+ // of a trailing tab when there are no description following it.
+ // For example, a sub-command without a description should not be completed
+ // with a tab at the end (or else zsh will show a -- following it
+ // although there is no description).
+ comp = strings.TrimSpace(comp)
+
+ // Print each possible completion to the output for the completion script to consume.
+ fmt.Fprintln(out, comp)
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(out, ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ var finalCmd *Command
+ var finalArgs []string
+ var err error
+ // Find the real command for which completion must be performed
+ // check if we need to traverse here to parse local flags on parent commands
+ if c.Root().TraverseChildren {
+ finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
+ } else {
+ // For Root commands that don't specify any value for their Args fields, when we call
+ // Find(), if those Root commands don't have any sub-commands, they will accept arguments.
+ // However, because we have added the __complete sub-command in the current code path, the
+ // call to Find() -> legacyArgs() will return an error if there are any arguments.
+ // To avoid this, we first remove the __complete command to get back to having no sub-commands.
+ rootCmd := c.Root()
+ if len(rootCmd.Commands()) == 1 {
+ rootCmd.RemoveCommand(c)
+ }
+
+ finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
+ }
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs)
+ }
+ finalCmd.ctx = c.ctx
+
+ // These flags are normally added when `execute()` is called on `finalCmd`,
+ // however, when doing completion, we don't call `finalCmd.execute()`.
+ // Let's add the --help and --version flag ourselves but only if the finalCmd
+ // has not disabled flag parsing; if flag parsing is disabled, it is up to the
+ // finalCmd itself to handle the completion of *all* flags.
+ if !finalCmd.DisableFlagParsing {
+ finalCmd.InitDefaultHelpFlag()
+ finalCmd.InitDefaultVersionFlag()
+ }
+
+ // Check if we are doing flag value completion before parsing the flags.
+ // This is important because if we are completing a flag value, we need to also
+ // remove the flag name argument from the list of finalArgs or else the parsing
+ // could fail due to an invalid value (incomplete) for the flag.
+ flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+
+ // Check if interspersed is false or -- was set on a previous arg.
+ // This works by counting the arguments. Normally -- is not counted as arg but
+ // if -- was already set or interspersed is false and there is already one arg then
+ // the extra added -- is counted as arg.
+ flagCompletion := true
+ _ = finalCmd.ParseFlags(append(finalArgs, "--"))
+ newArgCount := finalCmd.Flags().NArg()
+
+ // Parse the flags early so we can check if required flags are set
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ realArgCount := finalCmd.Flags().NArg()
+ if newArgCount > realArgCount {
+ // don't do flag completion (see above)
+ flagCompletion = false
+ }
+ // Error while attempting to parse flags
+ if flagErr != nil {
+ // If error type is flagCompError and we don't want flagCompletion we should ignore the error
+ if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
+ }
+ }
+
+ // Look for the --help or --version flags. If they are present,
+ // there should be no further completions.
+ if helpOrVersionFlagPresent(finalCmd) {
+ return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ if flag != nil && flagCompletion {
+ // Check if we are completing a flag value subject to annotations
+ if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
+ if len(validExts) != 0 {
+ // File completion filtered by extensions
+ return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
+ }
+
+ // The annotation requests simple file completion. There is no reason to do
+ // that since it is the default behavior anyway. Let's ignore this annotation
+ // in case the program also registered a completion function for this flag.
+ // Even though it is a mistake on the program's side, let's be nice when we can.
+ }
+
+ if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
+ if len(subDir) == 1 {
+ // Directory completion from within a directory
+ return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
+ }
+ // Directory completion
+ return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
+ }
+ }
+
+ var completions []string
+ var directive ShellCompDirective
+
+ // Enforce flag groups before doing flag completions
+ finalCmd.enforceFlagGroupsForCompletion()
+
+ // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
+ // doing this allows for completion of persistent flag names even for commands that disable flag parsing.
+ //
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag name to be complete
+ if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
+ // First check for required flags
+ completions = completeRequireFlags(finalCmd, toComplete)
+
+ // If we have not found any required flags, only then can we show regular flags
+ if len(completions) == 0 {
+ doCompleteFlags := func(flag *pflag.Flag) {
+ if !flag.Changed ||
+ strings.Contains(flag.Value.Type(), "Slice") ||
+ strings.Contains(flag.Value.Type(), "Array") {
+ // If the flag is not already present, or if it can be specified multiple times (Array or Slice)
+ // we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ // Try to complete non-inherited flags even if DisableFlagParsing==true.
+ // This allows programs to tell Cobra about flags for completion even
+ // if the actual parsing of flags is not done by Cobra.
+ // For instance, Helm uses this to provide flag name completion for
+ // some of its plugins.
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteFlags(flag)
+ })
+ }
+
+ directive = ShellCompDirectiveNoFileComp
+ if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
+ // If there is a single completion, the shell usually adds a space
+ // after the completion. We don't want that if the flag ends with an =
+ directive = ShellCompDirectiveNoSpace
+ }
+
+ if !finalCmd.DisableFlagParsing {
+ // If DisableFlagParsing==false, we have completed the flags as known by Cobra;
+ // we can return what we found.
+ // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
+ // let the logic continue to see if ValidArgsFunction needs to be called.
+ return finalCmd, completions, directive, nil
+ }
+ } else {
+ directive = ShellCompDirectiveDefault
+ if flag == nil {
+ foundLocalNonPersistentFlag := false
+ // If TraverseChildren is true on the root command we don't check for
+ // local flags because we can use a local flag on a parent command
+ if !finalCmd.Root().TraverseChildren {
+ // Check if there are any local, non-persistent flags on the command-line
+ localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
+ foundLocalNonPersistentFlag = true
+ }
+ })
+ }
+
+ // Complete subcommand names, including the help command
+ if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
+ // We only complete sub-commands if:
+ // - there are no arguments on the command-line and
+ // - there are no local, non-persistent flags on the command-line or TraverseChildren is true
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
+ if strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ directive = ShellCompDirectiveNoFileComp
+ }
+ }
+ }
+
+ // Complete required flags even without the '-' prefix
+ completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
+
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ if len(finalCmd.ValidArgs) > 0 {
+ if len(finalArgs) == 0 {
+ // ValidArgs are only for the first argument
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+ directive = ShellCompDirectiveNoFileComp
+
+ // If no completions were found within commands or ValidArgs,
+ // see if there are any ArgAliases that should be completed.
+ if len(completions) == 0 {
+ for _, argAlias := range finalCmd.ArgAliases {
+ if strings.HasPrefix(argAlias, toComplete) {
+ completions = append(completions, argAlias)
+ }
+ }
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, directive, nil
+ }
+
+ // Let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil && flagCompletion {
+ flagCompletionMutex.RLock()
+ completionFn = flagCompletionFunctions[flag]
+ flagCompletionMutex.RUnlock()
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn != nil {
+ // Go custom completion defined for this flag or command.
+ // Call the registered completion function to get the completions.
+ var comps []string
+ comps, directive = completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ }
+
+ return finalCmd, completions, directive, nil
+}
+
+func helpOrVersionFlagPresent(cmd *Command) bool {
+ if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil &&
+ len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed {
+ return true
+ }
+ if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil &&
+ len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed {
+ return true
+ }
+ return false
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ // Why suggest both long forms: --flag and --flag= ?
+ // This forces the user to *always* have to type either an = or a space after the flag name.
+ // Let's be nice and avoid making users have to do that.
+ // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
+ // The = form will still work, we just won't suggest it.
+ // This also makes the list of suggested flags shorter as we avoid all the = forms.
+ //
+ // if len(flag.NoOptDefVal) == 0 {
+ // // Flag requires a value, so it can be suffixed with =
+ // flagName += "="
+ // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ // }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func completeRequireFlags(finalCmd *Command, toComplete string) []string {
+ var completions []string
+
+ doCompleteRequiredFlags := func(flag *pflag.Flag) {
+ if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
+ if !flag.Changed {
+ // If the flag is not already present, we suggest it as a completion
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ }
+ }
+ }
+
+ // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
+ // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
+ // non-inherited flags.
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ doCompleteRequiredFlags(flag)
+ })
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ if finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ return nil, args, lastArg, nil
+ }
+
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ orgLastArg := lastArg
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as that function
+ // requires the flag name to be complete
+ if len(lastArg) > 0 && lastArg[0] == '-' {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ // Flag with an =
+ if strings.HasPrefix(lastArg[:index], "--") {
+ // Flag has full name
+ flagName = lastArg[2:index]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = lastArg[index-1 : index]
+ }
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ // Normal flag completion
+ return nil, args, lastArg, nil
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ if strings.HasPrefix(prevArg, "--") {
+ // Flag has full name
+ flagName = prevArg[2:]
+ } else {
+ // Flag is shorthand
+ // We have to get the last shorthand flag name
+ // e.g. `-asd` => d to provide the correct completion
+ // https://github.com/spf13/cobra/issues/1257
+ flagName = prevArg[len(prevArg)-1:]
+ }
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, the interspersed option might be set so return the original args
+ return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+// InitDefaultCompletionCmd adds a default 'completion' command to c.
+// This function will do nothing if any of the following is true:
+// 1- the feature has been explicitly disabled by the program,
+// 2- c has no subcommands (to avoid creating one),
+// 3- c already has a 'completion' command provided by the program.
+func (c *Command) InitDefaultCompletionCmd() {
+ if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
+ return
+ }
+
+ for _, cmd := range c.commands {
+ if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
+ // A completion command is already available
+ return
+ }
+ }
+
+ haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
+
+ completionCmd := &Command{
+ Use: compCmdName,
+ Short: "Generate the autocompletion script for the specified shell",
+ Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
+See each sub-command's help for details on how to use the generated script.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ Hidden: c.CompletionOptions.HiddenDefaultCmd,
+ GroupID: c.completionCommandGroupID,
+ }
+ c.AddCommand(completionCmd)
+
+ out := c.OutOrStdout()
+ noDesc := c.CompletionOptions.DisableDescriptions
+ shortDesc := "Generate the autocompletion script for %s"
+ bash := &Command{
+ Use: "bash",
+ Short: fmt.Sprintf(shortDesc, "bash"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
+
+This script depends on the 'bash-completion' package.
+If it is not installed already, you can install it via your OS's package manager.
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion bash)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion bash > /etc/bash_completion.d/%[1]s
+
+#### macOS:
+
+ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ DisableFlagsInUseLine: true,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenBashCompletionV2(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ zsh := &Command{
+ Use: "zsh",
+ Short: fmt.Sprintf(shortDesc, "zsh"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
+
+If shell completion is not already enabled in your environment you will need
+to enable it. You can execute the following once:
+
+ echo "autoload -U compinit; compinit" >> ~/.zshrc
+
+To load completions in your current shell session:
+
+ source <(%[1]s completion zsh)
+
+To load completions for every new session, execute once:
+
+#### Linux:
+
+ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
+
+#### macOS:
+
+ %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenZshCompletionNoDesc(out)
+ }
+ return cmd.Root().GenZshCompletion(out)
+ },
+ }
+ if haveNoDescFlag {
+ zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ fish := &Command{
+ Use: "fish",
+ Short: fmt.Sprintf(shortDesc, "fish"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
+
+To load completions in your current shell session:
+
+ %[1]s completion fish | source
+
+To load completions for every new session, execute once:
+
+ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
+
+You will need to start a new shell for this setup to take effect.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ return cmd.Root().GenFishCompletion(out, !noDesc)
+ },
+ }
+ if haveNoDescFlag {
+ fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ powershell := &Command{
+ Use: "powershell",
+ Short: fmt.Sprintf(shortDesc, "powershell"),
+ Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
+
+To load completions in your current shell session:
+
+ %[1]s completion powershell | Out-String | Invoke-Expression
+
+To load completions for every new session, add the output of the above command
+to your powershell profile.
+`, c.Root().Name()),
+ Args: NoArgs,
+ ValidArgsFunction: NoFileCompletions,
+ RunE: func(cmd *Command, args []string) error {
+ if noDesc {
+ return cmd.Root().GenPowerShellCompletion(out)
+ }
+ return cmd.Root().GenPowerShellCompletionWithDesc(out)
+
+ },
+ }
+ if haveNoDescFlag {
+ powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
+ }
+
+ completionCmd.AddCommand(bash, zsh, fish, powershell)
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ WriteStringAndCheck(f, msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprint(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
+
+// These values should not be changed: users will be using them explicitly.
+const (
+ configEnvVarGlobalPrefix = "COBRA"
+ configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS"
+)
+
+var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`)
+
+// configEnvVar returns the name of the program-specific configuration environment
+// variable. It has the format _ where is the name of the
+// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
+func configEnvVar(name, suffix string) string {
+ // This format should not be changed: users will be using it explicitly.
+ v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix))
+ v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_")
+ return v
+}
+
+// getEnvConfig returns the value of the configuration environment variable
+// _ where is the name of the root command in upper
+// case, with all non-ASCII-alphanumeric characters replaced by `_`.
+// If the value is empty or not set, the value of the environment variable
+// COBRA_ is returned instead.
+func getEnvConfig(cmd *Command, suffix string) string {
+ v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix))
+ if v == "" {
+ v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix))
+ }
+ return v
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 000000000..12d61b691
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,292 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
+function __%[1]s_debug
+ set -l file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion"
+
+ # Extract all args except the last one
+ set -l args (commandline -opc)
+ # Extract the last arg and escape it in case it is a space
+ set -l lastArg (string escape -- (commandline -ct))
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ # Disable ActiveHelp which is not supported for fish shell
+ set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
+
+ __%[1]s_debug "Calling $requestComp"
+ set -l results (eval $requestComp 2> /dev/null)
+
+ # Some programs may output extra empty lines after the directive.
+ # Let's ignore them or else it will break completion.
+ # Ref: https://github.com/spf13/cobra/issues/1279
+ for line in $results[-1..1]
+ if test (string trim -- $line) = ""
+ # Found an empty line, remove it
+ set results $results[1..-2]
+ else
+ # Found non-empty line, we have our proper output
+ break
+ end
+ end
+
+ set -l comps $results[1..-2]
+ set -l directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
+function __%[1]s_perform_completion_once
+ __%[1]s_debug "Starting __%[1]s_perform_completion_once"
+
+ if test -n "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
+ return 0
+ end
+
+ set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completions, probably due to a failure"
+ return 1
+ end
+
+ __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
+ return 0
+end
+
+# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
+function __%[1]s_clear_perform_completion_once_result
+ __%[1]s_debug ""
+ __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
+ set --erase __%[1]s_perform_completion_once_result
+ __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result"
+end
+
+function __%[1]s_requires_order_preservation
+ __%[1]s_debug ""
+ __%[1]s_debug "========= checking if order preservation is required =========="
+
+ __%[1]s_perform_completion_once
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "Error determining if order preservation is required"
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveKeepOrder %[9]d
+ set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
+ __%[1]s_debug "Keeporder is: $keeporder"
+
+ if test $keeporder -ne 0
+ __%[1]s_debug "This does require order preservation"
+ return 0
+ end
+
+ __%[1]s_debug "This doesn't require order preservation"
+ return 1
+end
+
+
+# This function does two things:
+# - Obtain the completions and store them in the global __%[1]s_comp_results
+# - Return false if file completion should be performed
+function __%[1]s_prepare_completions
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+
+ # Start fresh
+ set --erase __%[1]s_comp_results
+
+ __%[1]s_perform_completion_once
+ __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
+
+ if test -z "$__%[1]s_perform_completion_once_result"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
+ set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ set -l shellCompDirectiveError %[4]d
+ set -l shellCompDirectiveNoSpace %[5]d
+ set -l shellCompDirectiveNoFileComp %[6]d
+ set -l shellCompDirectiveFilterFileExt %[7]d
+ set -l shellCompDirectiveFilterDirs %[8]d
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ return 1
+ end
+
+ set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
+ set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
+ if test $filefilter -eq 1; or test $dirfilter -eq 1
+ __%[1]s_debug "File extension filtering or directory filtering not supported"
+ # Do full file completion instead
+ return 1
+ end
+
+ set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
+ set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # If we want to prevent a space, or if file completion is NOT disabled,
+ # we need to count the number of valid completions.
+ # To do so, we will filter on prefix as the completions we have received
+ # may not already be filtered so as to allow fish to match on different
+ # criteria than the prefix.
+ if test $nospace -ne 0; or test $nofiles -eq 0
+ set -l prefix (commandline -t | string escape --style=regex)
+ __%[1]s_debug "prefix: $prefix"
+
+ set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
+ set --global __%[1]s_comp_results $completions
+ __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
+
+ # Important not to quote the variable for count to work
+ set -l numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # We must first split on \t to get rid of the descriptions to be
+ # able to check what the actual completion will be.
+ # We don't need descriptions anyway since there is only a single
+ # real completion which the shell will expand immediately.
+ set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
+
+ # Fish won't add a space if the completion ends with any
+ # of the following characters: @=/:.,
+ set -l lastChar (string sub -s -1 -- $split)
+ if not string match -r -q "[@=/:.,]" -- "$lastChar"
+ # In other cases, to support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --global __%[1]s_comp_results $split[1] $split[1].
+ __%[1]s_debug "Completions are now: $__%[1]s_comp_results"
+ end
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ # To be consistent with bash and zsh, we only trigger file
+ # completion when there are no other completions
+ __%[1]s_debug "Requesting file completion"
+ return 1
+ end
+ end
+
+ return 0
+end
+
+# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
+# so we can properly delete any completions provided by another script.
+# Only do this if the program can be found, or else fish may print some errors; besides,
+# the existing completions will only be loaded if the program can be found.
+if type -q "%[2]s"
+ # The space after the program name is essential to trigger completion for the program
+ # and not completion of the program name itself.
+ # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
+ complete --do-complete "%[2]s " > /dev/null 2>&1
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them.
+complete -c %[2]s -e
+
+# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
+complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
+# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
+# which provides the program's completion choices.
+# If this doesn't require order preservation, we don't use the -k flag
+complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+# otherwise we use the -k flag
+complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+`, nameForVar, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go
new file mode 100644
index 000000000..560612fd3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/flag_groups.go
@@ -0,0 +1,290 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ flag "github.com/spf13/pflag"
+)
+
+const (
+ requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set"
+ oneRequiredAnnotation = "cobra_annotation_one_required"
+ mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive"
+)
+
+// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
+// if the command is invoked with a subset (but not all) of the given flags.
+func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
+// if the command is invoked without at least one flag from the given set of flags.
+func (c *Command) MarkFlagsOneRequired(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v))
+ }
+ if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil {
+ // Only errs if the flag isn't found.
+ panic(err)
+ }
+ }
+}
+
+// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
+// if the command is invoked with more than one flag from the given set of flags.
+func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
+ c.mergePersistentFlags()
+ for _, v := range flagNames {
+ f := c.Flags().Lookup(v)
+ if f == nil {
+ panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
+ }
+ // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
+ if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
+// first error encountered.
+func (c *Command) ValidateFlagGroups() error {
+ if c.DisableFlagParsing {
+ return nil
+ }
+
+ flags := c.Flags()
+
+ // groupStatus format is the list of flags as a unique ID,
+ // then a map of each flag name and whether it is set or not.
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ flags.VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ if err := validateRequiredFlagGroups(groupStatus); err != nil {
+ return err
+ }
+ if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil {
+ return err
+ }
+ if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
+ return err
+ }
+ return nil
+}
+
+func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
+ for _, fname := range flagnames {
+ f := fs.Lookup(fname)
+ if f == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
+ groupInfo, found := pflag.Annotations[annotation]
+ if found {
+ for _, group := range groupInfo {
+ if groupStatus[group] == nil {
+ flagnames := strings.Split(group, " ")
+
+ // Only consider this flag group at all if all the flags are defined.
+ if !hasAllFlags(flags, flagnames...) {
+ continue
+ }
+
+ groupStatus[group] = make(map[string]bool, len(flagnames))
+ for _, name := range flagnames {
+ groupStatus[group][name] = false
+ }
+ }
+
+ groupStatus[group][pflag.Name] = pflag.Changed
+ }
+ }
+}
+
+func validateRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+
+ unset := []string{}
+ for flagname, isSet := range flagnameAndStatus {
+ if !isSet {
+ unset = append(unset, flagname)
+ }
+ }
+ if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(unset)
+ return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
+ }
+
+ return nil
+}
+
+func validateOneRequiredFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) >= 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList)
+ }
+ return nil
+}
+
+func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
+ keys := sortedKeys(data)
+ for _, flagList := range keys {
+ flagnameAndStatus := data[flagList]
+ var set []string
+ for flagname, isSet := range flagnameAndStatus {
+ if isSet {
+ set = append(set, flagname)
+ }
+ }
+ if len(set) == 0 || len(set) == 1 {
+ continue
+ }
+
+ // Sort values, so they can be tested/scripted against consistently.
+ sort.Strings(set)
+ return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
+ }
+ return nil
+}
+
+func sortedKeys(m map[string]map[string]bool) []string {
+ keys := make([]string, len(m))
+ i := 0
+ for k := range m {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// enforceFlagGroupsForCompletion will do the following:
+// - when a flag in a group is present, other flags in the group will be marked required
+// - when none of the flags in a one-required group are present, all flags in the group will be marked required
+// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
+// This allows the standard completion logic to behave appropriately for flag groups
+func (c *Command) enforceFlagGroupsForCompletion() {
+ if c.DisableFlagParsing {
+ return
+ }
+
+ flags := c.Flags()
+ groupStatus := map[string]map[string]bool{}
+ oneRequiredGroupStatus := map[string]map[string]bool{}
+ mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
+ c.Flags().VisitAll(func(pflag *flag.Flag) {
+ processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
+ processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
+ processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
+ })
+
+ // If a flag that is part of a group is present, we make all the other flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range groupStatus {
+ for _, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the group is set, mark the other ones as required
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+ }
+
+ // If none of the flags of a one-required group are present, we make all the flags
+ // of that group required so that the shell completion suggests them automatically
+ for flagList, flagnameAndStatus := range oneRequiredGroupStatus {
+ isSet := false
+
+ for _, isSet = range flagnameAndStatus {
+ if isSet {
+ break
+ }
+ }
+
+ // None of the flags of the group are set, mark all flags in the group
+ // as required
+ if !isSet {
+ for _, fName := range strings.Split(flagList, " ") {
+ _ = c.MarkFlagRequired(fName)
+ }
+ }
+ }
+
+ // If a flag that is mutually exclusive to others is present, we hide the other
+ // flags of that group so the shell completion does not suggest them
+ for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
+ for flagName, isSet := range flagnameAndStatus {
+ if isSet {
+ // One of the flags of the mutually exclusive group is set, mark the other ones as hidden
+ // Don't mark the flag that is already set as hidden because it may be an
+ // array or slice flag and therefore must continue being suggested
+ for _, fName := range strings.Split(flagList, " ") {
+ if fName != flagName {
+ flag := c.Flags().Lookup(fName)
+ flag.Hidden = true
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 000000000..a830b7bca
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,325 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ // Variables should not contain a '-' or ':' character
+ nameForVar := name
+ nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
+ nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
+
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
+
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
+}
+
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
+}
+
+[scriptblock]${__%[2]sCompleterBlock} = {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[4]d
+ $ShellCompDirectiveNoSpace=%[5]d
+ $ShellCompDirectiveNoFileComp=%[6]d
+ $ShellCompDirectiveFilterFileExt=%[7]d
+ $ShellCompDirectiveFilterDirs=%[8]d
+ $ShellCompDirectiveKeepOrder=%[9]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+
+ $RequestComp="$Program %[3]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+ # PowerShell 7.2+ changed the way how the arguments are passed to executables,
+ # so for pre-7.2 or when Legacy argument passing is enabled we need to use
+`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
+ if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
+ ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
+ (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
+ $PSNativeCommandArgumentPassing -eq 'Legacy')) {
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
+ } else {
+ $RequestComp="$RequestComp" + ' ""'
+ }
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ # First disable ActiveHelp which is not supported for Powershell
+ ${env:%[10]s}=0
+
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ [Array]$Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ @{Name="$Name";Description="$Description"}
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
+
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+
+ # Join the flag back if we have an equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ # we sort the values in ascending order by name if keep order isn't passed
+ if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
+ $Values = $Values | Sort-Object -Property Name
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporary because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because that's not possible with TabCompleteNext
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ }
+}
+
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock}
+`, name, nameForVar, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
+}
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 000000000..b035742d3
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,98 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "github.com/spf13/pflag"
+)
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+ return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired instructs the various shell completion implementations to
+// prioritize the named persistent flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+ return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired instructs the various shell completion implementations to
+// prioritize the named flag when performing completion,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+ return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for the named persistent flag to the
+// specified file extensions.
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+ return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for the named flag to the specified file extensions.
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+ return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// The bash completion script will call the bash function f for the flag.
+//
+// This will only work for bash completion.
+// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
+// to register a Go function which will work across all shells.
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+ return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func (c *Command) MarkFlagDirname(name string) error {
+ return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to limit completions for the named persistent flag to
+// directory names.
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+ return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// limit completions for the named flag to directory names.
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+ return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 000000000..1856e4c7f
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,308 @@
+// Copyright 2013-2023 The Cobra Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// GenZshCompletionFile generates zsh completion file including descriptions.
+func (c *Command) GenZshCompletionFile(filename string) error {
+ return c.genZshCompletionFile(filename, true)
+}
+
+// GenZshCompletion generates zsh completion file including descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+ return c.genZshCompletion(w, true)
+}
+
+// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
+func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
+ return c.genZshCompletionFile(filename, false)
+}
+
+// GenZshCompletionNoDesc generates zsh completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
+ return c.genZshCompletion(w, false)
+}
+
+// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
+// not consistent with Bash completion. It has therefore been disabled.
+// Instead, when no other completion is specified, file completion is done by
+// default for every argument. One can disable file completion on a per-argument
+// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
+// To achieve file extension filtering, one can use ValidArgsFunction and
+// ShellCompDirectiveFilterFileExt.
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+ return nil
+}
+
+// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
+// been disabled.
+// To achieve the same behavior across all shells, one can use
+// ValidArgs (for the first argument only) or ValidArgsFunction for
+// any argument (can include the first one also).
+//
+// Deprecated
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+ return nil
+}
+
+func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.genZshCompletion(outFile, includeDesc)
+}
+
+func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genZshComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
+compdef _%[1]s %[1]s
+
+# zsh completion for %-36[1]s -*- shell-script -*-
+
+__%[1]s_debug()
+{
+ local file="$BASH_COMP_DEBUG_FILE"
+ if [[ -n ${file} ]]; then
+ echo "$*" >> "${file}"
+ fi
+}
+
+_%[1]s()
+{
+ local shellCompDirectiveError=%[3]d
+ local shellCompDirectiveNoSpace=%[4]d
+ local shellCompDirectiveNoFileComp=%[5]d
+ local shellCompDirectiveFilterFileExt=%[6]d
+ local shellCompDirectiveFilterDirs=%[7]d
+ local shellCompDirectiveKeepOrder=%[8]d
+
+ local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
+ local -a completions
+
+ __%[1]s_debug "\n========= starting completion logic =========="
+ __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CURRENT location, so we need
+ # to truncate the command-line ($words) up to the $CURRENT location.
+ # (We cannot use $CURSOR as its value does not work when a command is an alias.)
+ words=("${=words[1,CURRENT]}")
+ __%[1]s_debug "Truncated words[*]: ${words[*]},"
+
+ lastParam=${words[-1]}
+ lastChar=${lastParam[-1]}
+ __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
+
+ # For zsh, when completing a flag with an = (e.g., %[1]s -n=)
+ # completions must be prefixed with the flag
+ setopt local_options BASH_REMATCH
+ if [[ "${lastParam}" =~ '-.*=' ]]; then
+ # We are dealing with a flag with an =
+ flagPrefix="-P ${BASH_REMATCH}"
+ fi
+
+ # Prepare the command to obtain completions
+ requestComp="${words[1]} %[2]s ${words[2,-1]}"
+ if [ "${lastChar}" = "" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go completion code.
+ __%[1]s_debug "Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "About to call: eval ${requestComp}"
+
+ # Use eval to handle any environment variables and such
+ out=$(eval ${requestComp} 2>/dev/null)
+ __%[1]s_debug "completion output: ${out}"
+
+ # Extract the directive integer following a : from the last line
+ local lastLine
+ while IFS='\n' read -r line; do
+ lastLine=${line}
+ done < <(printf "%%s\n" "${out[@]}")
+ __%[1]s_debug "last line: ${lastLine}"
+
+ if [ "${lastLine[1]}" = : ]; then
+ directive=${lastLine[2,-1]}
+ # Remove the directive including the : and the newline
+ local suffix
+ (( suffix=${#lastLine}+2))
+ out=${out[1,-$suffix]}
+ else
+ # There is no directive specified. Leave $out as is.
+ __%[1]s_debug "No directive found. Setting do default"
+ directive=0
+ fi
+
+ __%[1]s_debug "directive: ${directive}"
+ __%[1]s_debug "completions: ${out}"
+ __%[1]s_debug "flagPrefix: ${flagPrefix}"
+
+ if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
+ __%[1]s_debug "Completion received error. Ignoring completions."
+ return
+ fi
+
+ local activeHelpMarker="%[9]s"
+ local endIndex=${#activeHelpMarker}
+ local startIndex=$((${#activeHelpMarker}+1))
+ local hasActiveHelp=0
+ while IFS='\n' read -r comp; do
+ # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
+ if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
+ __%[1]s_debug "ActiveHelp found: $comp"
+ comp="${comp[$startIndex,-1]}"
+ if [ -n "$comp" ]; then
+ compadd -x "${comp}"
+ __%[1]s_debug "ActiveHelp will need delimiter"
+ hasActiveHelp=1
+ fi
+
+ continue
+ fi
+
+ if [ -n "$comp" ]; then
+ # If requested, completions are returned with a description.
+ # The description is preceded by a TAB character.
+ # For zsh's _describe, we need to use a : instead of a TAB.
+ # We first need to escape any : as part of the completion itself.
+ comp=${comp//:/\\:}
+
+ local tab="$(printf '\t')"
+ comp=${comp//$tab/:}
+
+ __%[1]s_debug "Adding completion: ${comp}"
+ completions+=${comp}
+ lastComp=$comp
+ fi
+ done < <(printf "%%s\n" "${out[@]}")
+
+ # Add a delimiter after the activeHelp statements, but only if:
+ # - there are completions following the activeHelp statements, or
+ # - file completion will be performed (so there will be choices after the activeHelp)
+ if [ $hasActiveHelp -eq 1 ]; then
+ if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
+ __%[1]s_debug "Adding activeHelp delimiter"
+ compadd -x "--"
+ hasActiveHelp=0
+ fi
+ fi
+
+ if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
+ __%[1]s_debug "Activating nospace."
+ noSpace="-S ''"
+ fi
+
+ if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
+ __%[1]s_debug "Activating keep order."
+ keepOrder="-V"
+ fi
+
+ if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
+ # File extension filtering
+ local filteringCmd
+ filteringCmd='_files'
+ for filter in ${completions[@]}; do
+ if [ ${filter[1]} != '*' ]; then
+ # zsh requires a glob pattern to do file filtering
+ filter="\*.$filter"
+ fi
+ filteringCmd+=" -g $filter"
+ done
+ filteringCmd+=" ${flagPrefix}"
+
+ __%[1]s_debug "File filtering command: $filteringCmd"
+ _arguments '*:filename:'"$filteringCmd"
+ elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
+ # File completion for directories only
+ local subdir
+ subdir="${completions[1]}"
+ if [ -n "$subdir" ]; then
+ __%[1]s_debug "Listing directories in $subdir"
+ pushd "${subdir}" >/dev/null 2>&1
+ else
+ __%[1]s_debug "Listing directories in ."
+ fi
+
+ local result
+ _arguments '*:dirname:_files -/'" ${flagPrefix}"
+ result=$?
+ if [ -n "$subdir" ]; then
+ popd >/dev/null 2>&1
+ fi
+ return $result
+ else
+ __%[1]s_debug "Calling _describe"
+ if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
+ __%[1]s_debug "_describe found some completions"
+
+ # Return the success of having called _describe
+ return 0
+ else
+ __%[1]s_debug "_describe did not find completions."
+ __%[1]s_debug "Checking if we should do file completion."
+ if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
+ __%[1]s_debug "deactivating file completion"
+
+ # We must return an error code here to let zsh know that there were no
+ # completions found by _describe; this is what will trigger other
+ # matching algorithms to attempt to find completions.
+ # For example zsh can match letters in the middle of words.
+ return 1
+ else
+ # Perform file completion
+ __%[1]s_debug "Activating file completion"
+
+ # We must return the result of this command, so it must be the
+ # last command, or else we must store its result to return it.
+ _arguments '*:filename:_files'" ${flagPrefix}"
+ fi
+ fi
+ fi
+}
+
+# don't run the completion function when being source-ed or eval-ed
+if [ "$funcstack[1]" = "_%[1]s" ]; then
+ _%[1]s
+fi
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
+ activeHelpMarker))
+}
diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore
new file mode 100644
index 000000000..db5247b94
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/.gitignore
@@ -0,0 +1,17 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+vendor
+doc
+
+# Temporary files
+*~
+*.swp
+
+# Editor and IDE config
+.idea
+*.iml
+.vscode
diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml
new file mode 100644
index 000000000..7f98d55c4
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/.golangci.yml
@@ -0,0 +1,26 @@
+run:
+ deadline: 10m
+
+linters:
+ enable:
+ - dupl
+ - goconst
+ - gocyclo
+ - godox
+ - gosec
+ - interfacer
+ - lll
+ - maligned
+ - misspell
+ - prealloc
+ - stylecheck
+ - unconvert
+ - unparam
+ - errcheck
+ - golint
+ - gofmt
+ disable: []
+ fast: false
+
+issues:
+ exclude-use-default: false
diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE
new file mode 100644
index 000000000..a105a3819
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017, Adrian Stoewer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md
new file mode 100644
index 000000000..84a640e71
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/README.md
@@ -0,0 +1,50 @@
+[](https://github.com/stoewer/go-strcase/actions)
+[](https://codecov.io/github/stoewer/go-strcase)
+[](https://pkg.go.dev/github.com/stoewer/go-strcase)
+---
+
+Go strcase
+==========
+
+The package `strcase` converts between different kinds of naming formats such as camel case
+(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`).
+The package is designed to work only with strings consisting of standard ASCII letters.
+Unicode is currently not supported.
+
+Versioning and stability
+------------------------
+
+Although the master branch is supposed to remain always backward compatible, the repository
+contains version tags in order to support vendoring tools.
+The tag names follow semantic versioning conventions and have the following format `v1.0.0`.
+This package supports Go modules introduced with version 1.11.
+
+Example
+-------
+
+```go
+import "github.com/stoewer/go-strcase"
+
+var snake = strcase.SnakeCase("CamelCase")
+```
+
+Dependencies
+------------
+
+### Build dependencies
+
+* none
+
+### Test dependencies
+
+* `github.com/stretchr/testify`
+
+Run linters and unit tests
+--------------------------
+
+To run the static code analysis, linters and tests use the following commands:
+
+```
+golangci-lint run --config .golangci.yml ./...
+go test ./...
+```
diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go
new file mode 100644
index 000000000..ff9e66e0c
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/camel.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+import (
+ "strings"
+)
+
+// UpperCamelCase converts a string into camel case starting with a upper case letter.
+func UpperCamelCase(s string) string {
+ return camelCase(s, true)
+}
+
+// LowerCamelCase converts a string into camel case starting with a lower case letter.
+func LowerCamelCase(s string) string {
+ return camelCase(s, false)
+}
+
+func camelCase(s string, upper bool) string {
+ s = strings.TrimSpace(s)
+ buffer := make([]rune, 0, len(s))
+
+ stringIter(s, func(prev, curr, next rune) {
+ if !isDelimiter(curr) {
+ if isDelimiter(prev) || (upper && prev == 0) {
+ buffer = append(buffer, toUpper(curr))
+ } else if isLower(prev) {
+ buffer = append(buffer, curr)
+ } else if isUpper(prev) && isUpper(curr) && isLower(next) {
+ // Assume a case like "R" for "XRequestId"
+ buffer = append(buffer, curr)
+ } else {
+ buffer = append(buffer, toLower(curr))
+ }
+ }
+ })
+
+ return string(buffer)
+}
diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go
new file mode 100644
index 000000000..3e441ca3e
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+// Package strcase converts between different kinds of naming formats such as camel case
+// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed
+// to work only with strings consisting of standard ASCII letters. Unicode is currently not
+// supported.
+package strcase
diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go
new file mode 100644
index 000000000..ecad58914
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/helper.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+// isLower checks if a character is lower case. More precisely it evaluates if it is
+// in the range of ASCII character 'a' to 'z'.
+func isLower(ch rune) bool {
+ return ch >= 'a' && ch <= 'z'
+}
+
+// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower
+// case counterpart. Other characters remain the same.
+func toLower(ch rune) rune {
+ if ch >= 'A' && ch <= 'Z' {
+ return ch + 32
+ }
+ return ch
+}
+
+// isLower checks if a character is upper case. More precisely it evaluates if it is
+// in the range of ASCII characters 'A' to 'Z'.
+func isUpper(ch rune) bool {
+ return ch >= 'A' && ch <= 'Z'
+}
+
+// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower
+// case counterpart. Other characters remain the same.
+func toUpper(ch rune) rune {
+ if ch >= 'a' && ch <= 'z' {
+ return ch - 32
+ }
+ return ch
+}
+
+// isSpace checks if a character is some kind of whitespace.
+func isSpace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// isDelimiter checks if a character is some kind of whitespace or '_' or '-'.
+func isDelimiter(ch rune) bool {
+ return ch == '-' || ch == '_' || isSpace(ch)
+}
+
+// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the
+// rune at the respective string position as well as the previous and the next rune. If curr is at the
+// first position of the string prev is zero. If curr is at the end of the string next is zero.
+type iterFunc func(prev, curr, next rune)
+
+// stringIter iterates over a string, invoking the callback for every single rune in the string.
+func stringIter(s string, callback iterFunc) {
+ var prev rune
+ var curr rune
+ for _, next := range s {
+ if curr == 0 {
+ prev = curr
+ curr = next
+ continue
+ }
+
+ callback(prev, curr, next)
+
+ prev = curr
+ curr = next
+ }
+
+ if len(s) > 0 {
+ callback(prev, curr, 0)
+ }
+}
diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go
new file mode 100644
index 000000000..e9a648757
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/kebab.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+// KebabCase converts a string into kebab case.
+func KebabCase(s string) string {
+ return delimiterCase(s, '-', false)
+}
+
+// UpperKebabCase converts a string into kebab case with capital letters.
+func UpperKebabCase(s string) string {
+ return delimiterCase(s, '-', true)
+}
diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go
new file mode 100644
index 000000000..1b216e20c
--- /dev/null
+++ b/vendor/github.com/stoewer/go-strcase/snake.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2017, A. Stoewer
+// All rights reserved.
+
+package strcase
+
+import (
+ "strings"
+)
+
+// SnakeCase converts a string into snake case.
+func SnakeCase(s string) string {
+ return delimiterCase(s, '_', false)
+}
+
+// UpperSnakeCase converts a string into snake case with capital letters.
+func UpperSnakeCase(s string) string {
+ return delimiterCase(s, '_', true)
+}
+
+// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed
+// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE.
+func delimiterCase(s string, delimiter rune, upperCase bool) string {
+ s = strings.TrimSpace(s)
+ buffer := make([]rune, 0, len(s)+3)
+
+ adjustCase := toLower
+ if upperCase {
+ adjustCase = toUpper
+ }
+
+ var prev rune
+ var curr rune
+ for _, next := range s {
+ if isDelimiter(curr) {
+ if !isDelimiter(prev) {
+ buffer = append(buffer, delimiter)
+ }
+ } else if isUpper(curr) {
+ if isLower(prev) || (isUpper(prev) && isLower(next)) {
+ buffer = append(buffer, delimiter)
+ }
+ buffer = append(buffer, adjustCase(curr))
+ } else if curr != 0 {
+ buffer = append(buffer, adjustCase(curr))
+ }
+ prev = curr
+ curr = next
+ }
+
+ if len(s) > 0 {
+ if isUpper(curr) && isLower(prev) && prev != 0 {
+ buffer = append(buffer, delimiter)
+ }
+ buffer = append(buffer, adjustCase(curr))
+ }
+
+ return string(buffer)
+}
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 000000000..2a7cf70da
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000..2c033dff4
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 000000000..fbf1934a0
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000..46ceac343
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,515 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// clearSlice sets all elements up to the length of s to the zero value of E.
+// We may use the builtin clear func instead, and remove clearSlice, when upgrading
+// to Go 1.21+.
+func clearSlice[S ~[]E, E any](s S) {
+ var zero E
+ for i := range s {
+ s[i] = zero
+ }
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j:len(s)] // bounds check
+
+ if i == j {
+ return s
+ }
+
+ oldlen := len(s)
+ s = append(s[:i], s[j:]...)
+ clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+ return s
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// DeleteFunc zeroes the elements between the new length and the original length.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// Compact zeroes the elements between the new length and the original length.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000..f58bbc7ba
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b or when a is not comparable to b in the sense
+// of the formal definition of Strict Weak Ordering.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+// To indicate 'uncomparable', return 0 from the function.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
new file mode 100644
index 000000000..06f2c7a24
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000..99b47c398
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]